Initialize
Some checks failed
Build wheels / build (ubuntu-latest, 3.11) (push) Has been cancelled
Build wheels / build (ubuntu-latest, 3.12) (push) Has been cancelled
Build wheels / build (ubuntu-latest, 3.13) (push) Has been cancelled
docs / evaluate-label (push) Has been cancelled
Tests / check (push) Has been cancelled
docs / deploy-docs (push) Has been cancelled
Tests / build (ubuntu-latest, 3.11) (push) Has been cancelled
Tests / build (ubuntu-latest, 3.12) (push) Has been cancelled
Tests / build (ubuntu-latest, 3.13) (push) Has been cancelled
Some checks failed
Build wheels / build (ubuntu-latest, 3.11) (push) Has been cancelled
Build wheels / build (ubuntu-latest, 3.12) (push) Has been cancelled
Build wheels / build (ubuntu-latest, 3.13) (push) Has been cancelled
docs / evaluate-label (push) Has been cancelled
Tests / check (push) Has been cancelled
docs / deploy-docs (push) Has been cancelled
Tests / build (ubuntu-latest, 3.11) (push) Has been cancelled
Tests / build (ubuntu-latest, 3.12) (push) Has been cancelled
Tests / build (ubuntu-latest, 3.13) (push) Has been cancelled
This commit is contained in:
9
.envrc
Normal file
9
.envrc
Normal file
@@ -0,0 +1,9 @@
|
||||
if ! has nix_direnv_version || ! nix_direnv_version 2.2.1; then
|
||||
source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/2.2.1/direnvrc" "sha256-zelF0vLbEl5uaqrfIzbgNzJWGmLzCmYAkInj/LNxvKs="
|
||||
fi
|
||||
|
||||
watch_file flake.nix
|
||||
watch_file flake.lock
|
||||
if ! use flake . --impure; then
|
||||
echo "devenv could not be built. The devenv environment was not loaded. Make the necessary changes to devenv.nix and hit enter to try again." >&2
|
||||
fi
|
||||
22
.github/workflows/deploy.yml
vendored
Normal file
22
.github/workflows/deploy.yml
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
# A single CI script with github workflow
|
||||
name: Build wheels
|
||||
|
||||
on:
|
||||
push:
|
||||
release:
|
||||
types:
|
||||
- published
|
||||
|
||||
jobs:
|
||||
build:
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest]
|
||||
python-version: ["3.11", "3.12", "3.13"]
|
||||
uses: qiboteam/workflows/.github/workflows/deploy-pip-poetry.yml@v1
|
||||
with:
|
||||
os: ${{ matrix.os }}
|
||||
python-version: ${{ matrix.python-version }}
|
||||
publish: ${{ github.event_name == 'release' && github.event.action == 'published' && matrix.os == 'ubuntu-latest' && matrix.python-version == '3.9' }}
|
||||
poetry-extras: "--with docs,tests,analysis"
|
||||
secrets: inherit
|
||||
38
.github/workflows/publish.yml
vendored
Normal file
38
.github/workflows/publish.yml
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
name: docs
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches: [main]
|
||||
tags:
|
||||
- "*"
|
||||
|
||||
jobs:
|
||||
evaluate-label:
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
label: ${{ steps.label_step.outputs.version}}
|
||||
steps:
|
||||
- name: checks for the label
|
||||
id: label_step
|
||||
run: |
|
||||
if [[ "${{ github.ref }}" == "refs/heads/main" ]]; then
|
||||
echo "version=latest" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
if [[ "${{ github.ref_type }}" == "branch" ]] && [[ "${{ github.ref }}" != "refs/heads/main" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
if [[ "${{ github.ref_type }}" == "tag" ]]; then
|
||||
echo "version=stable" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
deploy-docs:
|
||||
needs: [evaluate-label]
|
||||
uses: qiboteam/workflows/.github/workflows/deploy-ghpages-latest-stable.yml@v1
|
||||
with:
|
||||
python-version: "3.11"
|
||||
package-manager: "poetry"
|
||||
dependency-path: "**/poetry.lock"
|
||||
trigger-label: "${{needs.evaluate-label.outputs.label}}"
|
||||
project: qibotn
|
||||
poetry-extras: --with docs
|
||||
38
.github/workflows/rules.yml
vendored
Normal file
38
.github/workflows/rules.yml
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
# A single CI script with github workflow
|
||||
name: Tests
|
||||
|
||||
env:
|
||||
CUDA_PATH:
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
pull_request:
|
||||
types: [labeled]
|
||||
|
||||
jobs:
|
||||
check:
|
||||
# job to check cuda availability for local gpu host runners
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- id: step1
|
||||
run: echo "test=${{ env.CUDA_PATH != ''}}" >> "$GITHUB_OUTPUT"
|
||||
- id: step2
|
||||
run: echo "test=${{ contains(github.event.pull_request.labels.*.name, 'run-workflow') || github.event_name == 'push' }}" >> "$GITHUB_OUTPUT"
|
||||
outputs:
|
||||
cuda_avail: ${{ fromJSON(steps.step1.outputs.test) && fromJSON(steps.step2.outputs.test) }}
|
||||
|
||||
build:
|
||||
# job to build
|
||||
needs: check
|
||||
if: ${{fromJSON(needs.check.outputs.cuda_avail)}}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest]
|
||||
python-version: ["3.11", "3.12", "3.13"]
|
||||
uses: qiboteam/workflows/.github/workflows/rules-poetry.yml@v1
|
||||
with:
|
||||
os: ${{ matrix.os }}
|
||||
python-version: ${{ matrix.python-version }}
|
||||
poetry-extras: "--with analysis,tests"
|
||||
secrets: inherit
|
||||
162
.gitignore
vendored
Normal file
162
.gitignore
vendored
Normal file
@@ -0,0 +1,162 @@
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
# For a library or package, you might want to ignore these files since the code is
|
||||
# intended to run in multiple environments; otherwise, check them in:
|
||||
# .python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# poetry
|
||||
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
||||
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||
# commonly ignored for libraries.
|
||||
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
||||
#poetry.lock
|
||||
|
||||
# pdm
|
||||
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
||||
#pdm.lock
|
||||
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
||||
# in version control.
|
||||
# https://pdm.fming.dev/#use-with-ide
|
||||
.pdm.toml
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
# PyCharm
|
||||
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
#.idea/
|
||||
.devenv
|
||||
29
.pre-commit-config.yaml
Normal file
29
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,29 @@
|
||||
ci:
|
||||
autofix_prs: true
|
||||
repos:
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v6.0.0
|
||||
hooks:
|
||||
- id: trailing-whitespace
|
||||
- id: end-of-file-fixer
|
||||
- id: check-yaml
|
||||
- id: check-toml
|
||||
- id: debug-statements
|
||||
- repo: https://github.com/psf/black-pre-commit-mirror
|
||||
rev: 26.3.1
|
||||
hooks:
|
||||
- id: black
|
||||
- repo: https://github.com/pycqa/isort
|
||||
rev: 8.0.1
|
||||
hooks:
|
||||
- id: isort
|
||||
args: ["--profile", "black"]
|
||||
- repo: https://github.com/asottile/pyupgrade
|
||||
rev: v3.21.2
|
||||
hooks:
|
||||
- id: pyupgrade
|
||||
- repo: https://github.com/hadialqattan/pycln
|
||||
rev: v2.6.0
|
||||
hooks:
|
||||
- id: pycln
|
||||
args: [--config=pyproject.toml]
|
||||
201
LICENSE
Normal file
201
LICENSE
Normal file
@@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2020 The QIBO team
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
128
README.md
Normal file
128
README.md
Normal file
@@ -0,0 +1,128 @@
|
||||
# Qibotn
|
||||
|
||||
The tensor network translation module for Qibo to support large-scale simulation of quantum circuits and acceleration.
|
||||
|
||||
## Supported Computation
|
||||
|
||||
Tensor Network Types:
|
||||
|
||||
- Tensornet (TN)
|
||||
- Matrix Product States (MPS)
|
||||
|
||||
Tensor Network contractions to:
|
||||
|
||||
- dense vectors
|
||||
- expecation values of given Pauli string
|
||||
|
||||
The supported HPC configurations are:
|
||||
|
||||
- single-node CPU
|
||||
- single-node GPU or GPUs
|
||||
- multi-node multi-GPU with Message Passing Interface (MPI)
|
||||
- multi-node multi-GPU with NVIDIA Collective Communications Library (NCCL)
|
||||
|
||||
Currently, the supported tensor network libraries are:
|
||||
|
||||
- [cuQuantum](https://github.com/NVIDIA/cuQuantum), an NVIDIA SDK of optimized libraries and tools for accelerating quantum computing workflows.
|
||||
- [quimb](https://quimb.readthedocs.io/en/latest/), an easy but fast python library for ‘quantum information many-body’ calculations, focusing primarily on tensor networks.
|
||||
|
||||
## Installation
|
||||
|
||||
To get started:
|
||||
|
||||
```sh
|
||||
pip install qibotn
|
||||
```
|
||||
|
||||
to install the tools and dependencies. A few extras are provided, check `pyproject.toml` in
|
||||
case you need them.
|
||||
|
||||
<!-- TODO: describe extras, after Poetry adoption and its groups -->
|
||||
|
||||
## Contribute
|
||||
|
||||
To contribute, please install using poetry:
|
||||
|
||||
```sh
|
||||
git clone https://github.com/qiboteam/qibotn.git
|
||||
cd qibotn
|
||||
poetry install
|
||||
```
|
||||
|
||||
## Sample Codes
|
||||
|
||||
### Single-Node Example
|
||||
|
||||
The code below shows an example of how to activate the Cuquantum TensorNetwork backend of Qibo.
|
||||
|
||||
```py
|
||||
import numpy as np
|
||||
from qibo import Circuit, gates
|
||||
import qibo
|
||||
|
||||
# Below shows how to set the computation_settings
|
||||
# Note that for MPS_enabled and expectation_enabled parameters the accepted inputs are boolean or a dictionary with the format shown below.
|
||||
# If computation_settings is not specified, the default setting is used in which all booleans will be False.
|
||||
# This will trigger the dense vector computation of the tensornet.
|
||||
|
||||
computation_settings = {
|
||||
"MPI_enabled": False,
|
||||
"MPS_enabled": {
|
||||
"qr_method": False,
|
||||
"svd_method": {
|
||||
"partition": "UV",
|
||||
"abs_cutoff": 1e-12,
|
||||
},
|
||||
},
|
||||
"NCCL_enabled": False,
|
||||
"expectation_enabled": False,
|
||||
}
|
||||
|
||||
|
||||
qibo.set_backend(
|
||||
backend="qibotn", platform="cutensornet", runcard=computation_settings
|
||||
) # cuQuantum
|
||||
# qibo.set_backend(backend="qibotn", platform="qutensornet", runcard=computation_settings) #quimb
|
||||
|
||||
|
||||
# Construct the circuit
|
||||
c = Circuit(2)
|
||||
# Add some gates
|
||||
c.add(gates.H(0))
|
||||
c.add(gates.H(1))
|
||||
|
||||
# Execute the circuit and obtain the final state
|
||||
result = c()
|
||||
|
||||
print(result.state())
|
||||
```
|
||||
|
||||
Other examples of setting the computation_settings
|
||||
|
||||
```py
|
||||
# Expectation computation with specific Pauli String pattern
|
||||
computation_settings = {
|
||||
"MPI_enabled": False,
|
||||
"MPS_enabled": False,
|
||||
"NCCL_enabled": False,
|
||||
"expectation_enabled": {
|
||||
"pauli_string_pattern": "IXZ",
|
||||
},
|
||||
}
|
||||
|
||||
# Dense vector computation using multi node through MPI
|
||||
computation_settings = {
|
||||
"MPI_enabled": True,
|
||||
"MPS_enabled": False,
|
||||
"NCCL_enabled": False,
|
||||
"expectation_enabled": False,
|
||||
}
|
||||
```
|
||||
|
||||
### Multi-Node Example
|
||||
|
||||
Multi-node is enabled by setting either the MPI or NCCL enabled flag to True in the computation settings. Below shows the script to launch on 2 nodes with 2 GPUs each. $node_list contains the IP of the nodes assigned.
|
||||
|
||||
```sh
|
||||
mpirun -n 4 -hostfile $node_list python test.py
|
||||
```
|
||||
20
doc/Makefile
Normal file
20
doc/Makefile
Normal file
@@ -0,0 +1,20 @@
|
||||
# Minimal makefile for Sphinx documentation
|
||||
#
|
||||
|
||||
# You can set these variables from the command line, and also
|
||||
# from the environment for the first two.
|
||||
SPHINXOPTS ?=
|
||||
SPHINXBUILD ?= sphinx-build
|
||||
SOURCEDIR = source
|
||||
BUILDDIR = build
|
||||
|
||||
# Put it first so that "make" without argument is like "make help".
|
||||
help:
|
||||
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
|
||||
.PHONY: help Makefile
|
||||
|
||||
# Catch-all target: route all unknown targets to Sphinx using the new
|
||||
# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
|
||||
%: Makefile
|
||||
@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
|
||||
35
doc/make.bat
Normal file
35
doc/make.bat
Normal file
@@ -0,0 +1,35 @@
|
||||
@ECHO OFF
|
||||
|
||||
pushd %~dp0
|
||||
|
||||
REM Command file for Sphinx documentation
|
||||
|
||||
if "%SPHINXBUILD%" == "" (
|
||||
set SPHINXBUILD=sphinx-build
|
||||
)
|
||||
set SOURCEDIR=source
|
||||
set BUILDDIR=build
|
||||
|
||||
%SPHINXBUILD% >NUL 2>NUL
|
||||
if errorlevel 9009 (
|
||||
echo.
|
||||
echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
|
||||
echo.installed, then set the SPHINXBUILD environment variable to point
|
||||
echo.to the full path of the 'sphinx-build' executable. Alternatively you
|
||||
echo.may add the Sphinx directory to PATH.
|
||||
echo.
|
||||
echo.If you don't have Sphinx installed, grab it from
|
||||
echo.https://www.sphinx-doc.org/
|
||||
exit /b 1
|
||||
)
|
||||
|
||||
if "%1" == "" goto help
|
||||
|
||||
%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
|
||||
goto end
|
||||
|
||||
:help
|
||||
%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O%
|
||||
|
||||
:end
|
||||
popd
|
||||
1
doc/source/.gitignore
vendored
Normal file
1
doc/source/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
api-reference/
|
||||
BIN
doc/source/QiboTN.png
Normal file
BIN
doc/source/QiboTN.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 145 KiB |
7
doc/source/_static/css/style.css
Normal file
7
doc/source/_static/css/style.css
Normal file
@@ -0,0 +1,7 @@
|
||||
.wy-side-nav-search {
|
||||
background-color: #6400FF;
|
||||
}
|
||||
|
||||
.wy-nav-top {
|
||||
background-color: #6400FF;
|
||||
}
|
||||
121
doc/source/_static/qibo_logo_dark.svg
Normal file
121
doc/source/_static/qibo_logo_dark.svg
Normal file
@@ -0,0 +1,121 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<svg
|
||||
xmlns:osb="http://www.openswatchbook.org/uri/2009/osb"
|
||||
xmlns:dc="http://purl.org/dc/elements/1.1/"
|
||||
xmlns:cc="http://creativecommons.org/ns#"
|
||||
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
id="svg8"
|
||||
version="1.1"
|
||||
viewBox="0 0 90 35"
|
||||
height="35mm"
|
||||
width="90mm"
|
||||
sodipodi:docname="qibo_logo.svg"
|
||||
inkscape:export-filename="/home/carrazza/repo/qiboteam/qibo/doc/source/logo.png"
|
||||
inkscape:export-xdpi="261.92001"
|
||||
inkscape:export-ydpi="261.92001"
|
||||
inkscape:version="0.92.3 (2405546, 2018-03-11)">
|
||||
<sodipodi:namedview
|
||||
pagecolor="#ffffff"
|
||||
bordercolor="#666666"
|
||||
borderopacity="1"
|
||||
objecttolerance="10"
|
||||
gridtolerance="10"
|
||||
guidetolerance="10"
|
||||
inkscape:pageopacity="0"
|
||||
inkscape:pageshadow="2"
|
||||
inkscape:window-width="1869"
|
||||
inkscape:window-height="1025"
|
||||
id="namedview13"
|
||||
showgrid="false"
|
||||
inkscape:zoom="2.8284271"
|
||||
inkscape:cx="153.85323"
|
||||
inkscape:cy="71.280204"
|
||||
inkscape:window-x="51"
|
||||
inkscape:window-y="27"
|
||||
inkscape:window-maximized="1"
|
||||
inkscape:current-layer="flowRoot815" />
|
||||
<defs
|
||||
id="defs2">
|
||||
<linearGradient
|
||||
osb:paint="solid"
|
||||
id="linearGradient2367">
|
||||
<stop
|
||||
id="stop2365"
|
||||
offset="0"
|
||||
style="stop-color:#000000;stop-opacity:1;" />
|
||||
</linearGradient>
|
||||
</defs>
|
||||
<metadata
|
||||
id="metadata5">
|
||||
<rdf:RDF>
|
||||
<cc:Work
|
||||
rdf:about="">
|
||||
<dc:format>image/svg+xml</dc:format>
|
||||
<dc:type
|
||||
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
|
||||
<dc:title></dc:title>
|
||||
</cc:Work>
|
||||
</rdf:RDF>
|
||||
</metadata>
|
||||
<g
|
||||
transform="translate(0,-262)"
|
||||
id="layer1">
|
||||
<g
|
||||
id="flowRoot815"
|
||||
style="font-style:normal;font-weight:normal;font-size:40px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
|
||||
transform="matrix(0.90659938,0,0,0.90659938,-216.91173,-176.98871)"
|
||||
aria-label="QIBO">
|
||||
<g
|
||||
id="g1073"
|
||||
inkscape:export-xdpi="261.92001"
|
||||
inkscape:export-ydpi="261.92001"
|
||||
transform="matrix(1.5046043,0,0,1.5046043,-145.46705,-253.28199)">
|
||||
<g
|
||||
id="flowRoot2371"
|
||||
style="font-style:normal;font-weight:normal;font-size:40px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
|
||||
transform="matrix(0.82282627,0,0,0.82282627,139.12023,451.94249)"
|
||||
aria-label="QIBO">
|
||||
<path
|
||||
sodipodi:nodetypes="ccsssscscccscsccscsssc"
|
||||
inkscape:connector-curvature="0"
|
||||
id="path1057"
|
||||
style="font-style:normal;font-variant:normal;font-weight:bold;font-stretch:normal;font-size:40px;font-family:'URW Gothic L';-inkscape-font-specification:'URW Gothic L, Normal';font-variant-ligatures:normal;font-variant-caps:normal;font-variant-numeric:normal;font-feature-settings:normal;text-align:start;writing-mode:lr-tb;text-anchor:start;fill:#000000"
|
||||
d="m 221.43789,77.595414 c -1.82831,-0.788504 -32.12188,0.399305 -51.6517,-6.124563 2.4,-2.48 3.56,-5.56 3.56,-9.48 0,-8.84 -6.71135,-15.283251 -15.67135,-15.283251 -8.88,0 -15.76,6.68 -15.76,15.28 0,8.52 7.08,15.32 16,15.32 3,0 6.15135,-1.036749 8.23135,-2.476749 2.52,1.72 4.44,2.4 7,2.4 0.16,0 0.4,0 0.72,-0.04 z M 161.47484,71.2676 c -1.24,0.56 -2.08,0.76 -3.56,0.76 -3.72,0 -6.52,-1.36 -8.76,-4.32 1.28,-0.24 1.88,-0.32 2.84,-0.32 3.24,0 6.08,1.16 9.48,3.88 z m 3.92,-2.64 c -3.6,-3.56 -8.64,-5.72 -13.36,-5.72 -1.76,0 -3.2,0.16 -4.56,0.52 -0.08,-0.72 -0.08,-1.04 -0.08,-1.4 0,-5.72 4.4,-10.04 10.2,-10.04 5.84,0 10.2,4.36 10.2,10.24 0,2.68 -0.8,4.76 -2.4,6.4 z" />
|
||||
<path
|
||||
id="path1059"
|
||||
style="font-weight:bold;font-size:28.37454605px;fill:#000000"
|
||||
d="m 176.23806,76.7876 h 3.88732 V 55.81881 h -3.88732 z"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
id="path1061"
|
||||
style="font-weight:bold;font-size:28.37454605px;fill:#000000"
|
||||
d="m 184.16299,76.7876 h 5.58978 c 2.32671,0 4.02919,-0.36887 5.30604,-1.163357 1.73085,-1.049858 2.72396,-2.922578 2.72396,-5.079044 0,-1.305229 -0.36887,-2.582083 -1.02149,-3.43332 -0.59586,-0.794487 -1.19173,-1.191731 -2.5537,-1.759221 1.67409,-0.879611 2.32671,-1.986219 2.32671,-3.944062 0,-1.957844 -0.87961,-3.603568 -2.38346,-4.568302 -1.24848,-0.794488 -2.55371,-1.021484 -5.67491,-1.021484 h -4.31293 z m 3.88731,-3.717066 v -5.362789 h 1.84434 c 2.69559,0 3.97244,0.879611 3.97244,2.752331 0,1.815971 -1.16336,2.610458 -3.88731,2.610458 z m 0,-9.164978 v -4.284557 h 1.58897 c 2.0146,0 3.00771,0.680989 3.00771,2.128091 0,1.503851 -0.99311,2.156466 -3.3482,2.156466 z"
|
||||
inkscape:connector-curvature="0" />
|
||||
<path
|
||||
id="path1063"
|
||||
style="font-weight:bold;font-size:28.37454605px;fill:#000000"
|
||||
d="m 210.61028,55.449941 c -6.2424,0 -11.06607,4.738549 -11.06607,10.867451 0,6.043778 4.88042,10.839077 11.0377,10.839077 6.10052,0 10.8107,-4.73855 10.8107,-10.867451 0,-5.98703 -4.7953,-10.839077 -10.78233,-10.839077 z m -0.0567,3.74544 c 3.88731,0 6.92339,3.1212 6.92339,7.150386 0,4.029185 -2.95095,7.065262 -6.92339,7.065262 -3.97244,0 -7.09364,-3.1212 -7.09364,-7.093637 0,-4.05756 3.06445,-7.122011 7.09364,-7.122011 z"
|
||||
inkscape:connector-curvature="0" />
|
||||
</g>
|
||||
<rect
|
||||
y="506.51813"
|
||||
x="261.8475"
|
||||
height="5.5406642"
|
||||
width="10.228919"
|
||||
id="rect957"
|
||||
style="opacity:1;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1.10302305;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
|
||||
<rect
|
||||
y="503.69379"
|
||||
x="257.51999"
|
||||
height="3.4096398"
|
||||
width="6.7660036"
|
||||
id="rect959"
|
||||
style="opacity:1;fill:#000000;fill-opacity:1;stroke:none;stroke-width:0.83806002;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1" />
|
||||
</g>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 6.3 KiB |
80
doc/source/_static/qibo_logo_light.svg
Normal file
80
doc/source/_static/qibo_logo_light.svg
Normal file
@@ -0,0 +1,80 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<svg
|
||||
id="svg8"
|
||||
version="1.1"
|
||||
viewBox="0 0 90 35"
|
||||
height="35mm"
|
||||
width="90mm"
|
||||
sodipodi:docname="qibo_logo_light.svg"
|
||||
inkscape:export-filename="/home/carrazza/repo/qiboteam/qibo/doc/source/logo.png"
|
||||
inkscape:export-xdpi="261.92001"
|
||||
inkscape:export-ydpi="261.92001"
|
||||
inkscape:version="1.2.2 (732a01da63, 2022-12-09, custom)"
|
||||
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
|
||||
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
|
||||
xmlns="http://www.w3.org/2000/svg"
|
||||
xmlns:svg="http://www.w3.org/2000/svg"
|
||||
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
|
||||
xmlns:cc="http://creativecommons.org/ns#"
|
||||
xmlns:dc="http://purl.org/dc/elements/1.1/">
|
||||
<sodipodi:namedview
|
||||
pagecolor="#ffffff"
|
||||
bordercolor="#666666"
|
||||
borderopacity="1"
|
||||
objecttolerance="10"
|
||||
gridtolerance="10"
|
||||
guidetolerance="10"
|
||||
inkscape:pageopacity="0"
|
||||
inkscape:pageshadow="2"
|
||||
inkscape:window-width="2497"
|
||||
inkscape:window-height="1376"
|
||||
id="namedview13"
|
||||
showgrid="false"
|
||||
inkscape:zoom="2.8284271"
|
||||
inkscape:cx="153.9725"
|
||||
inkscape:cy="71.594562"
|
||||
inkscape:window-x="63"
|
||||
inkscape:window-y="27"
|
||||
inkscape:window-maximized="1"
|
||||
inkscape:current-layer="flowRoot815"
|
||||
inkscape:showpageshadow="2"
|
||||
inkscape:pagecheckerboard="0"
|
||||
inkscape:deskcolor="#d1d1d1"
|
||||
inkscape:document-units="mm" />
|
||||
<defs
|
||||
id="defs2">
|
||||
<linearGradient
|
||||
id="linearGradient2367"
|
||||
inkscape:swatch="solid">
|
||||
<stop
|
||||
id="stop2365"
|
||||
offset="0"
|
||||
style="stop-color:#000000;stop-opacity:1;" />
|
||||
</linearGradient>
|
||||
</defs>
|
||||
<metadata
|
||||
id="metadata5">
|
||||
<rdf:RDF>
|
||||
<cc:Work
|
||||
rdf:about="">
|
||||
<dc:format>image/svg+xml</dc:format>
|
||||
<dc:type
|
||||
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
|
||||
</cc:Work>
|
||||
</rdf:RDF>
|
||||
</metadata>
|
||||
<g
|
||||
transform="translate(0,-262)"
|
||||
id="layer1">
|
||||
<g
|
||||
id="flowRoot815"
|
||||
style="font-style:normal;font-weight:normal;font-size:40px;line-height:1.25;font-family:sans-serif;letter-spacing:0px;word-spacing:0px;fill:#000000;fill-opacity:1;stroke:none"
|
||||
transform="matrix(0.90659938,0,0,0.90659938,-216.91173,-176.98871)"
|
||||
aria-label="QIBO">
|
||||
<path
|
||||
style="fill:#ffffff;fill-opacity:1;stroke:#ffffff;stroke-width:0;stroke-dasharray:none;stroke-opacity:1"
|
||||
d="m 308.44186,522.48902 c -11.12297,-0.1006 -22.91662,-0.20496 -26.20811,-0.23192 -5.11415,-0.0419 -6.09708,-0.0743 -6.75839,-0.22296 -1.65231,-0.37139 -3.27213,-1.06624 -4.99681,-2.14345 -0.45812,-0.28614 -0.87938,-0.5201 -0.93613,-0.51991 -0.0568,2.1e-4 -0.47432,0.22157 -0.92794,0.49196 -4.10006,2.44398 -9.51715,3.13647 -14.46957,1.84972 -3.78898,-0.98446 -7.45414,-3.21039 -9.88651,-6.00428 -4.44138,-5.10151 -5.79461,-11.88933 -3.65283,-18.32271 1.31216,-3.94138 4.03608,-7.41622 7.60147,-9.69699 2.3024,-1.47284 5.08537,-2.48437 7.98661,-2.90289 1.27434,-0.18384 4.1649,-0.21017 5.45808,-0.0497 6.02561,0.7476 11.05859,3.87282 14.03264,8.71355 1.04539,1.70153 1.9443,4.04651 2.37536,6.19654 0.28447,1.41886 0.39493,4.77822 0.20756,6.3125 -0.41076,3.36356 -1.71376,6.37954 -3.72506,8.62211 -0.295,0.32892 -0.51913,0.61382 -0.49809,0.63313 0.22248,0.20409 7.4225,2.22543 7.92701,2.22543 0.0743,0 0.10783,-3.35341 0.10783,-10.78247 v -10.78247 h 2.37272 2.37272 l 0.0262,11.32242 0.0262,11.32243 0.36114,0.0635 c 0.19862,0.0349 1.22012,0.21062 2.26999,0.39041 1.04987,0.17979 2.01333,0.35021 2.14102,0.37872 l 0.23216,0.0518 v -11.77477 -11.77477 l 4.41101,0.0506 c 3.49847,0.0401 4.58991,0.0854 5.27576,0.2191 2.73116,0.5323 4.64501,2.25928 5.33567,4.81469 0.21115,0.78129 0.24064,2.66411 0.0538,3.43616 -0.28774,1.18904 -0.97993,2.11239 -2.16206,2.88408 l -0.55101,0.35969 1.0259,0.52697 c 1.57277,0.80787 2.43951,1.78326 2.95652,3.32714 0.91135,2.7214 0.32774,5.68806 -1.51151,7.68341 -0.48861,0.53008 -1.07391,0.95757 -1.98798,1.45198 -0.48752,0.26369 -0.56427,0.33543 -0.36113,0.33756 0.14187,10e-4 0.97764,0.0706 1.85726,0.15354 3.45188,0.32558 10.74734,0.78482 14.70338,0.92556 l 1.54772,0.0551 -0.72227,-0.16015 c -5.25686,-1.1656 -9.17072,-4.90745 -10.42813,-9.96981 -0.3265,-1.31451 -0.44765,-3.61658 -0.26,-4.94036 0.55545,-3.91841 2.62402,-7.21586 5.86551,-9.35006 2.26025,-1.48815 4.71811,-2.19764 7.61319,-2.19764 2.8838,0 5.17666,0.69545 7.57836,2.29861 0.98384,0.65673 2.78882,2.47361 3.45675,3.47956 1.99467,3.00408 2.74074,6.68281 2.06593,10.18681 -0.90486,4.6986 -4.23807,8.54925 -8.67877,10.02603 -1.02357,0.3404 -2.30536,0.62381 -2.90066,0.64137 l -0.39128,0.0115 0.46432,0.0814 c 0.25537,0.0448 2.39123,0.13741 4.74634,0.2059 11.359,0.33032 5.97987,0.35342 -22.44198,0.0964 z m 18.55282,-5.19771 c 1.61549,-0.47543 2.77679,-1.19752 3.93119,-2.44439 1.69733,-1.83327 2.54333,-4.87296 2.08541,-7.49293 -0.29775,-1.70355 -0.77722,-2.8281 -1.75162,-4.1082 -1.03007,-1.35325 -2.57186,-2.41215 -4.29671,-2.951 -0.68783,-0.21489 -0.96884,-0.24122 -2.52794,-0.23685 -1.6409,0.005 -1.81069,0.0237 -2.63114,0.29587 -1.11611,0.37027 -1.85195,0.74206 -2.6376,1.33264 -1.72349,1.29557 -2.88849,3.19575 -3.30029,5.38292 -0.20864,1.10815 -0.15413,3.04635 0.11318,4.02409 0.9121,3.33627 3.48829,5.72444 6.90812,6.40395 1.11107,0.22076 2.97469,0.12725 4.1074,-0.2061 z m -25.61859,-0.2885 c 1.72416,-0.43796 2.49515,-1.34936 2.50532,-2.96158 0.008,-1.29619 -0.30062,-2.01219 -1.14585,-2.65687 -0.82704,-0.6308 -1.55192,-0.78322 -3.96697,-0.83413 l -2.14101,-0.0451 v 3.37683 3.37681 l 2.08942,-0.0556 c 1.26752,-0.0337 2.31352,-0.11254 2.65909,-0.20032 z m -32.04295,-6.07848 c 1.43497,-1.83396 2.10338,-3.70909 2.27525,-6.3829 0.24364,-3.79045 -1.17566,-7.57044 -3.77497,-10.0538 -2.20298,-2.10472 -4.95571,-3.25776 -8.1895,-3.43034 -5.93089,-0.31652 -11.15657,3.21234 -12.78646,8.6346 -0.3865,1.28581 -0.51384,2.16538 -0.52286,3.61136 l -0.008,1.28977 3.97249,0.0626 c 3.03395,0.0478 4.20407,0.1035 4.95271,0.23576 4.6071,0.81394 8.92112,2.87977 12.28063,5.88073 0.53801,0.48059 1.03359,0.87719 1.10128,0.88133 0.0677,0.004 0.38245,-0.32396 0.69946,-0.7291 z m 30.60271,-5.18796 c 0.93298,-0.16645 1.50252,-0.43856 1.92498,-0.91972 0.82225,-0.93649 0.65628,-2.89684 -0.30538,-3.60709 -0.7275,-0.53729 -1.13803,-0.62621 -3.0964,-0.67068 l -1.83147,-0.0416 v 2.69274 2.69275 l 1.26397,-0.004 c 0.69518,-0.002 1.61512,-0.0662 2.0443,-0.14282 z"
|
||||
id="path214" />
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
||||
|
After Width: | Height: | Size: 6.3 KiB |
106
doc/source/conf.py
Normal file
106
doc/source/conf.py
Normal file
@@ -0,0 +1,106 @@
|
||||
# Configuration file for the Sphinx documentation builder.
|
||||
#
|
||||
# For the full list of built-in configuration values, see the documentation:
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html
|
||||
|
||||
# -- Path setup --------------------------------------------------------------
|
||||
|
||||
# If extensions (or modules to document with autodoc) are in another directory,
|
||||
# add these directories to sys.path here. If the directory is relative to the
|
||||
# documentation root, use os.path.abspath to make it absolute, like shown here.
|
||||
#
|
||||
from pathlib import Path
|
||||
|
||||
from sphinx.ext import apidoc
|
||||
|
||||
import qibotn
|
||||
|
||||
# sys.path.insert(0, os.path.abspath(".."))
|
||||
|
||||
|
||||
# -- Project information -----------------------------------------------------
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
|
||||
|
||||
project = "Qibotn"
|
||||
copyright = "The Qibo team"
|
||||
author = "The Qibo team"
|
||||
|
||||
# The full version, including alpha/beta/rc tags
|
||||
release = qibotn.__version__
|
||||
|
||||
# -- General configuration ---------------------------------------------------
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
|
||||
|
||||
|
||||
master_doc = "index"
|
||||
|
||||
# Add any Sphinx extension module names here, as strings. They can be
|
||||
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
|
||||
# ones.
|
||||
extensions = [
|
||||
"sphinx.ext.autodoc",
|
||||
"sphinx.ext.doctest",
|
||||
"sphinx.ext.coverage",
|
||||
"sphinx.ext.napoleon",
|
||||
"sphinx.ext.intersphinx",
|
||||
"sphinx_copybutton",
|
||||
"sphinxcontrib.katex",
|
||||
]
|
||||
|
||||
templates_path = ["_templates"]
|
||||
exclude_patterns = []
|
||||
|
||||
|
||||
# -- Options for HTML output -------------------------------------------------
|
||||
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
|
||||
|
||||
html_theme = "furo"
|
||||
html_favicon = "favicon.ico"
|
||||
|
||||
# custom title
|
||||
html_title = "QiboTN · v" + release
|
||||
|
||||
html_static_path = ["_static"]
|
||||
html_show_sourcelink = False
|
||||
|
||||
html_theme_options = {
|
||||
"top_of_page_button": "edit",
|
||||
"source_repository": "https://github.com/qiboteam/qibotn/",
|
||||
"source_branch": "main",
|
||||
"source_directory": "doc/source/",
|
||||
"light_logo": "qibo_logo_dark.svg",
|
||||
"dark_logo": "qibo_logo_light.svg",
|
||||
"light_css_variables": {
|
||||
"color-brand-primary": "#6400FF",
|
||||
"color-brand-secondary": "#6400FF",
|
||||
"color-brand-content": "#6400FF",
|
||||
},
|
||||
"footer_icons": [
|
||||
{
|
||||
"name": "GitHub",
|
||||
"url": "https://github.com/qiboteam/qibotn",
|
||||
"html": """
|
||||
<svg stroke="currentColor" fill="currentColor" stroke-width="0" viewBox="0 0 16 16">
|
||||
<path fill-rule="evenodd" d="M8 0C3.58 0 0 3.58 0 8c0 3.54 2.29 6.53 5.47 7.59.4.07.55-.17.55-.38 0-.19-.01-.82-.01-1.49-2.01.37-2.53-.49-2.69-.94-.09-.23-.48-.94-.82-1.13-.28-.15-.68-.52-.01-.53.63-.01 1.08.58 1.23.82.72 1.21 1.87.87 2.33.66.07-.52.28-.87.51-1.07-1.78-.2-3.64-.89-3.64-3.95 0-.87.31-1.59.82-2.15-.08-.2-.36-1.02.08-2.12 0 0 .67-.21 2.2.82.64-.18 1.32-.27 2-.27.68 0 1.36.09 2 .27 1.53-1.04 2.2-.82 2.2-.82.44 1.1.16 1.92.08 2.12.51.56.82 1.27.82 2.15 0 3.07-1.87 3.75-3.65 3.95.29.25.54.73.54 1.48 0 1.07-.01 1.93-.01 2.2 0 .21.15.46.55.38A8.013 8.013 0 0 0 16 8c0-4.42-3.58-8-8-8z"></path>
|
||||
</svg>
|
||||
""",
|
||||
"class": "",
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
autodoc_mock_imports = ["cupy", "cuquantum"]
|
||||
|
||||
|
||||
def run_apidoc(_):
|
||||
"""Extract autodoc directives from package structure."""
|
||||
source = Path(__file__).parent
|
||||
docs_dest = source / "api-reference"
|
||||
package = source.parents[1] / "src" / "qibotn"
|
||||
apidoc.main(["--no-toc", "--module-first", "-o", str(docs_dest), str(package)])
|
||||
|
||||
|
||||
def setup(app):
|
||||
app.add_css_file("css/style.css")
|
||||
|
||||
app.connect("builder-inited", run_apidoc)
|
||||
BIN
doc/source/favicon.ico
Normal file
BIN
doc/source/favicon.ico
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 15 KiB |
12
doc/source/getting-started/index.rst
Normal file
12
doc/source/getting-started/index.rst
Normal file
@@ -0,0 +1,12 @@
|
||||
Getting started
|
||||
===============
|
||||
|
||||
|
||||
In this section we present the basic aspects of the Qibotn design and provide installation instructions.
|
||||
Please visit the following sections to understand how ``qibotn`` works.
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
|
||||
installation
|
||||
quickstart
|
||||
10
doc/source/getting-started/installation.rst
Normal file
10
doc/source/getting-started/installation.rst
Normal file
@@ -0,0 +1,10 @@
|
||||
Installation instructions
|
||||
=========================
|
||||
|
||||
QiboTN can be installed directly from the source repository on Github:
|
||||
|
||||
.. code-block::
|
||||
|
||||
git clone https://github.com/qiboteam/qibotn.git
|
||||
cd qibotn
|
||||
poetry install
|
||||
138
doc/source/getting-started/quickstart.rst
Normal file
138
doc/source/getting-started/quickstart.rst
Normal file
@@ -0,0 +1,138 @@
|
||||
Quick start
|
||||
===========
|
||||
|
||||
In this section, we provide examples on how to use Qibotn to execute tensor network
|
||||
simulation of quantum circuit. First, we show how to use the Cutensornet and Quimb
|
||||
backends, while in a second moment we show a complete example of usage of the Quantum
|
||||
Matcha Tea Backend.
|
||||
|
||||
Setting the backend with Cutensornet and Quimb
|
||||
""""""""""""""""""""""""""""""""""""""""""""""
|
||||
|
||||
Among the backends provided by Qibotn, we have cutensornet (using cuQuantum library)
|
||||
and qutensornet (using Quimb library) for tensor network based simulations.
|
||||
At present, cutensornet backend works only for GPUs whereas qutensornet for CPUs.
|
||||
These backend can be set using the following command line.
|
||||
|
||||
To use cuQuantum library, cutensornet can be specified as follows::
|
||||
|
||||
qibo.set_backend(
|
||||
backend="qibotn", platform="cutensornet", runcard=computation_settings
|
||||
)
|
||||
|
||||
Similarly, to use Quimb library, qutensornet can be set as follows::
|
||||
|
||||
qibo.set_backend(
|
||||
backend="qibotn", platform="qutensornet", runcard=computation_settings
|
||||
)
|
||||
|
||||
Setting the runcard
|
||||
"""""""""""""""""""
|
||||
|
||||
The basic structure of the runcard is as follows::
|
||||
|
||||
computation_settings = {
|
||||
"MPI_enabled": False,
|
||||
"MPS_enabled": False,
|
||||
"NCCL_enabled": False,
|
||||
"expectation_enabled": {
|
||||
"pauli_string_pattern": "IXZ",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
**MPI_enabled:** Setting this option *True* results in parallel execution of circuit using MPI (Message Passing Interface). At present, only works for cutensornet platform.
|
||||
|
||||
**MPS_enabled:** This option is set *True* for Matrix Product State (MPS) based calculations where as general tensor network structure is used for *False* value.
|
||||
|
||||
**NCCL_enabled:** This is set *True* for cutensoret interface for further acceleration while using Nvidia Collective Communication Library (NCCL).
|
||||
|
||||
**expectation_enabled:** This option is set *True* while calculating expecation value of the circuit. Observable whose expectation value is to be calculated is passed as a string in the dict format as {"pauli_string_pattern": "observable"}. When the option is set *False*, the dense vector state of the circuit is calculated.
|
||||
|
||||
|
||||
Basic example
|
||||
"""""""""""""
|
||||
|
||||
The following is a basic example to execute a two qubit circuit and print the final state in dense vector form using quimb backend::
|
||||
|
||||
import qibo
|
||||
from qibo import Circuit, gates
|
||||
|
||||
# Set the runcard
|
||||
computation_settings = {
|
||||
"MPI_enabled": False,
|
||||
"MPS_enabled": False,
|
||||
"NCCL_enabled": False,
|
||||
"expectation_enabled": False,
|
||||
}
|
||||
|
||||
|
||||
# Set the quimb backend
|
||||
qibo.set_backend(
|
||||
backend="qibotn", platform="qutensornet", runcard=computation_settings
|
||||
)
|
||||
|
||||
|
||||
# Construct the circuit with two qubits
|
||||
c = Circuit(2)
|
||||
|
||||
# Apply Hadamard gates on first and second qubit
|
||||
c.add(gates.H(0))
|
||||
c.add(gates.H(1))
|
||||
|
||||
# Execute the circuit and obtain the final state
|
||||
result = c()
|
||||
|
||||
# Print the final state
|
||||
print(result.state())
|
||||
|
||||
|
||||
Using the Quantum Matcha Tea backend
|
||||
""""""""""""""""""""""""""""""""""""
|
||||
|
||||
In the following we show an example of how the Quantum Matcha Tea backend can be
|
||||
used to execute a quantum circuit::
|
||||
|
||||
# We need Qibo to setup the circuit and the backend
|
||||
from qibo import Circuit, gates
|
||||
from qibo.models.encodings import ghz_state
|
||||
from qibo.backends import construct_backend
|
||||
|
||||
# We need Quantum Matcha Tea to customize the tensor network simulation
|
||||
from qmatchatea import QCConvergenceParameters
|
||||
|
||||
# Set the number of qubits
|
||||
nqubits = 40
|
||||
|
||||
# Construct a circuit preparing a Quantum Fourier Transform
|
||||
circuit = ghz_state(nqubits)
|
||||
|
||||
# Construct the backend
|
||||
backend = construct_backend(backend="qibotn", platform="qmatchatea")
|
||||
|
||||
# Customize the low-level backend preferences according to Qibo's formalism
|
||||
backend.set_device("/CPU:1")
|
||||
backend.set_precision("double")
|
||||
|
||||
# Customize the tensor network simulation itself
|
||||
backend.configure_tn_simulation(
|
||||
ansatz = "MPS",
|
||||
convergence_params = QCConvergenceParameters(max_bond_dimension=50, cut_ratio=1e-6)
|
||||
)
|
||||
|
||||
# Execute the tensor network simulation
|
||||
outcome = backend.execute_circuit(
|
||||
circuit = circuit,
|
||||
nshots=1024,
|
||||
)
|
||||
|
||||
# Print some results
|
||||
print(outcome.probabilities())
|
||||
# Should print something like: {'0000000000000000000000000000000000000000': 0.5000000000000001, '1111111111111111111111111111111111111111': 0.5000000000000001}
|
||||
print(outcome.frequencies())
|
||||
# Should print something like: {'0000000000000000000000000000000000000000': 488, '1111111111111111111111111111111111111111': 536}
|
||||
|
||||
|
||||
By default, the simulator is choosing a specific method to compute the probabilities,
|
||||
and for further information we recommend the user to refer to our High-Level-API
|
||||
docstrings: :doc:`/api-reference/qibotn.backends`.
|
||||
101
doc/source/index.rst
Normal file
101
doc/source/index.rst
Normal file
@@ -0,0 +1,101 @@
|
||||
.. title::
|
||||
QiboTN
|
||||
|
||||
What is QiboTN?
|
||||
===============
|
||||
|
||||
Qibotn is an high-level library which integrates tensor network simulation within
|
||||
the `Qibo <https://github.com/qiboteam/qibo>`_ ecosystem.
|
||||
|
||||
If you are familiar with Qibo, you will be well aware of the modularity we provide
|
||||
through the use of our backends: after building a specific algorithm or quantum
|
||||
circuit, any of our backends can be selected to perform operations on the
|
||||
desired hardware (classical or quantum).
|
||||
|
||||
Here, we extend this modularity to one of the most famous quantum inspired simulation
|
||||
technique.
|
||||
|
||||
We do this by relying on well-known and maintained packages, and integrating their
|
||||
operation into our own dedicated backends.
|
||||
|
||||
.. image:: QiboTN.png
|
||||
|
||||
|
||||
As shown in the figure above, we currently support three different backends, which
|
||||
correspond to the three mentioned packages:
|
||||
- `cuQuantum <https://github.com/NVIDIA/cuQuantum>`_: an NVIDIA SDK of optimized libraries and tools for accelerating quantum computing workflows (we refer to the specific `Cutensornet <https://docs.nvidia.com/cuda/cuquantum/latest/cutensornet/index.html>`_ library);
|
||||
- `quimb <https://quimb.readthedocs.io/en/latest/>`_: an easy but fast python library for ‘quantum information many-body’ calculations, focusing primarily on tensor networks;
|
||||
- `Quantum Matcha Tea <https://www.quantumtea.it/>`_: a logical quantum computer emulator powered by matrix product states. Read `here <https://github.com/qiboteam/qibotn/blob/restore/examples/qmatchatea_intro/qmatchatea_introduction.ipynb>`_ if you want to have an example on how using the Quantum Matcha Tea backend.
|
||||
|
||||
.. warning::
|
||||
|
||||
There are currently two ways to use the three backends (`qmatchatea` is
|
||||
slightly different from the others), but we are working to standardize the interface.
|
||||
|
||||
Thanks to the mentioned packages, we currently support some tensor network ansatze:
|
||||
Matrix Product States (MPS) on any mentioned backend, Tree Tensor Networks (TTN)
|
||||
through the Quantum Matcha Tea backend and a more general Tensor Network (TN) ansatz through
|
||||
Cutensornet and Quimb.
|
||||
|
||||
Supported simulation features
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
We support Tensor Network contractions to:
|
||||
|
||||
- dense vectors (all the backends)
|
||||
- expecation values of given Pauli string (Cutensornet and Qmatchatea)
|
||||
|
||||
The supported HPC configurations are:
|
||||
|
||||
- single-node CPU through Quimb and Qmatchatea
|
||||
- single-node GPU or GPUs through Cutensornet and Qmatchatea
|
||||
- multi-node multi-GPU with Message Passing Interface (MPI) through Cutensornet
|
||||
- multi-node multi-GPU with NVIDIA Collective Communications Library (NCCL) through Cutensornet
|
||||
|
||||
|
||||
How to Use the Documentation
|
||||
============================
|
||||
|
||||
Welcome to the comprehensive documentation for QiboTN! This guide will help you navigate through the various sections and make the most of the resources available.
|
||||
|
||||
|
||||
1. **Getting started**: Begin by referring to the
|
||||
:doc:`/getting-started/installation/` guide to set up the ``QiboTN`` library in your environment.
|
||||
|
||||
2. **Tutorials**: Explore the :doc:`getting-started/quickstart/` section for basic usage examples
|
||||
|
||||
|
||||
Contents
|
||||
--------
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 2
|
||||
:caption: Introduction
|
||||
|
||||
getting-started/index
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:caption: Main documentation
|
||||
|
||||
api-reference/qibotn
|
||||
Developer guides <https://qibo.science/qibo/stable/developer-guides/index.html>
|
||||
|
||||
.. toctree::
|
||||
:maxdepth: 1
|
||||
:caption: Documentation links
|
||||
|
||||
Qibo docs <https://qibo.science/qibo/stable/>
|
||||
Qibolab docs <https://qibo.science/qibolab/stable/>
|
||||
Qibocal docs <https://qibo.science/qibocal/stable/>
|
||||
Qibosoq docs <https://qibo.science/qibosoq/stable/>
|
||||
Qibochem docs <https://qibo.science/qibochem/stable/>
|
||||
Qibotn docs <https://qibo.science/qibotn/stable/>
|
||||
Qibo-cloud-backends docs <https://qibo.science/qibo-cloud-backends/stable/>
|
||||
|
||||
Indices and tables
|
||||
==================
|
||||
|
||||
* :ref:`genindex`
|
||||
* :ref:`modindex`
|
||||
* :ref:`search`
|
||||
581
examples/qmatchatea_intro/qmatchatea_introduction.ipynb
Normal file
581
examples/qmatchatea_intro/qmatchatea_introduction.ipynb
Normal file
@@ -0,0 +1,581 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "656bb283-ac6d-48d2-a029-3c417c9961f8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Introduction to Quantum Matcha Tea backend in QiboTN\n",
|
||||
"\n",
|
||||
"#### Some imports"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "6722d94e-e311-48f9-b6df-c6d829bf67fb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import time\n",
|
||||
"import numpy as np\n",
|
||||
"from scipy import stats\n",
|
||||
"\n",
|
||||
"import qibo\n",
|
||||
"from qibo import Circuit, gates, hamiltonians\n",
|
||||
"from qibo.backends import construct_backend"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "a009a5e0-cfd4-4a49-9f7c-e82f252c6147",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Some hyper parameters"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "64162116-1555-4a68-811c-01593739d622",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# construct qibotn backend\n",
|
||||
"qmatcha_backend = construct_backend(backend=\"qibotn\", platform=\"qmatchatea\")\n",
|
||||
"\n",
|
||||
"# set number of qubits\n",
|
||||
"nqubits = 4\n",
|
||||
"\n",
|
||||
"# set numpy random seed\n",
|
||||
"np.random.seed(42)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "252f5cd1-5932-4de6-8076-4a357d50ebad",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Constructing a parametric quantum circuit"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "4a22a172-f50d-411d-afa3-fa61937c7b3a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def build_circuit(nqubits, nlayers):\n",
|
||||
" \"\"\"Construct a parametric quantum circuit.\"\"\"\n",
|
||||
" circ = Circuit(nqubits)\n",
|
||||
" for _ in range(nlayers):\n",
|
||||
" for q in range(nqubits):\n",
|
||||
" circ.add(gates.RY(q=q, theta=0.))\n",
|
||||
" circ.add(gates.RZ(q=q, theta=0.))\n",
|
||||
" [circ.add(gates.CNOT(q%nqubits, (q+1)%nqubits) for q in range(nqubits))]\n",
|
||||
" circ.add(gates.M(*range(nqubits)))\n",
|
||||
" return circ"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "76f23c57-6d08-496b-9a27-52fb63bbfcb1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"0: ─RY─RZ─o─────X─RY─RZ─o─────X─RY─RZ─o─────X─M─\n",
|
||||
"1: ─RY─RZ─X─o───|─RY─RZ─X─o───|─RY─RZ─X─o───|─M─\n",
|
||||
"2: ─RY─RZ───X─o─|─RY─RZ───X─o─|─RY─RZ───X─o─|─M─\n",
|
||||
"3: ─RY─RZ─────X─o─RY─RZ─────X─o─RY─RZ─────X─o─M─\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"circuit = build_circuit(nqubits=nqubits, nlayers=3)\n",
|
||||
"circuit.draw()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "07b2c097-cea2-42ec-8f1d-b4bbb5b71d98",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Setting random parameters\n",
|
||||
"circuit.set_parameters(\n",
|
||||
" parameters=np.random.uniform(-np.pi, np.pi, len(circuit.get_parameters())),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fd0cea52-03f5-4366-a01a-a5a84aa8ebc7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Setting up the tensor network simulator\n",
|
||||
"\n",
|
||||
"Depending on the simulator, various parameters can be set. One can customize the tensor network execution via the `backend.configure_tn_simulation` function, whose face depends on the specific backend provider."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "2ee03e94-d794-4a51-9e76-01e8d8a259ba",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Customization of the tensor network simulation in the case of qmatchatea\n",
|
||||
"# Here we use only some of the possible arguments\n",
|
||||
"qmatcha_backend.configure_tn_simulation(\n",
|
||||
" ansatz=\"MPS\",\n",
|
||||
" max_bond_dimension=10,\n",
|
||||
" cut_ratio=1e-6,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "648d85b8-445d-4081-aeed-1691fbae67be",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Executing through the backend\n",
|
||||
"\n",
|
||||
"The `backend.execute_circuit` method can be used then. We can simulate results in three ways:\n",
|
||||
"1. reconstruction of the final state (statevector like, only if `nqubits < 20` due to Quantum Matcha Tea setup) only if `return_array` is set to `True`;\n",
|
||||
"2. computation of the relevant probabilities of the final state. There are three way of doing so, but we will see it directly into the docstrings;\n",
|
||||
"3. reconstruction of the relevant state's frequencies (only if `nshots` is not `None`)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "35a244c3-adba-4b8b-b28c-0ab592b0f7cf",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'nqubits': 4,\n",
|
||||
" 'backend': QMatchaTeaBackend(),\n",
|
||||
" 'measures': None,\n",
|
||||
" 'measured_probabilities': {'U': {'0000': (0.0, 0.08390937969317301),\n",
|
||||
" '0001': (0.08390937969317301, 0.08858639088838134),\n",
|
||||
" '0010': (0.08858639088838131, 0.1832549957082757),\n",
|
||||
" '0011': (0.1832549957082757, 0.25896776804349736),\n",
|
||||
" '0100': (0.2589677680434974, 0.33039716334036867),\n",
|
||||
" '0101': (0.33039716334036867, 0.386620221067355),\n",
|
||||
" '0110': (0.3866202210673549, 0.4380808691410473),\n",
|
||||
" '0111': (0.4380808691410473, 0.47837271988834),\n",
|
||||
" '1000': (0.47837271988834, 0.5916815553716759),\n",
|
||||
" '1001': (0.5916815553716759, 0.5972581739037379),\n",
|
||||
" '1010': (0.5972581739037378, 0.6359857590550054),\n",
|
||||
" '1011': (0.6359857590550054, 0.6894851559808782),\n",
|
||||
" '1100': (0.6894851559808783, 0.7030911408535467),\n",
|
||||
" '1101': (0.7030911408535467, 0.8264027395524797),\n",
|
||||
" '1110': (0.8264027395524797, 0.8981519382820797),\n",
|
||||
" '1111': (0.8981519382820797, 0.9999999999999998)},\n",
|
||||
" 'E': [None],\n",
|
||||
" 'G': [None]},\n",
|
||||
" 'prob_type': 'U',\n",
|
||||
" 'statevector': None}"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Simple execution (defaults)\n",
|
||||
"outcome = qmatcha_backend.execute_circuit(circuit=circuit)\n",
|
||||
"\n",
|
||||
"# Print outcome\n",
|
||||
"vars(outcome)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "60501c3d-2a44-421f-b434-4a508714b132",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'nqubits': 4,\n",
|
||||
" 'backend': QMatchaTeaBackend(),\n",
|
||||
" 'measures': None,\n",
|
||||
" 'measured_probabilities': {'U': [None],\n",
|
||||
" 'E': [None],\n",
|
||||
" 'G': {'1110': 0.07174919872960005,\n",
|
||||
" '1111': 0.10184806171792007,\n",
|
||||
" '0010': 0.09466860481989439,\n",
|
||||
" '0011': 0.07571277233522165}},\n",
|
||||
" 'prob_type': 'G',\n",
|
||||
" 'statevector': array([ 0.08809627-0.27595005j, 0.24859731-0.22695421j,\n",
|
||||
" 0.18807826+0.18988408j, 0.09444097+0.06846085j,\n",
|
||||
" 0.00470148+0.30764671j, 0.17371395-0.09247188j,\n",
|
||||
" -0.18900305+0.12545316j, -0.17359753+0.20399288j,\n",
|
||||
" -0.0517478 +0.04471215j, -0.0411739 -0.06230031j,\n",
|
||||
" 0.22377064+0.07842041j, -0.21784975-0.27541439j,\n",
|
||||
" -0.27208941+0.04098933j, -0.22748127+0.04185292j,\n",
|
||||
" 0.17105258-0.10503745j, -0.01729753-0.31866731j])}"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Execution with a specific probability type\n",
|
||||
"# We use here \"E\", which is cutting some of the components if under a threshold\n",
|
||||
"# We also retrieve the statevector\n",
|
||||
"outcome = qmatcha_backend.execute_circuit(\n",
|
||||
" circuit=circuit,\n",
|
||||
" prob_type=\"G\",\n",
|
||||
" prob_threshold=0.3,\n",
|
||||
" return_array=True,\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Print outcome\n",
|
||||
"vars(outcome)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "84ec0b48-f6b4-495c-93b8-8e42d1a8b0df",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"\n",
|
||||
"One can access to the specific contents of the simulation outcome."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "c0443efc-21ef-4ed5-9cf4-785d204a1881",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Probabilities:\n",
|
||||
" [0.0717492 0.10184806 0.0946686 0.07571277]\n",
|
||||
"\n",
|
||||
"State:\n",
|
||||
" [ 0.08809627-0.27595005j 0.24859731-0.22695421j 0.18807826+0.18988408j\n",
|
||||
" 0.09444097+0.06846085j 0.00470148+0.30764671j 0.17371395-0.09247188j\n",
|
||||
" -0.18900305+0.12545316j -0.17359753+0.20399288j -0.0517478 +0.04471215j\n",
|
||||
" -0.0411739 -0.06230031j 0.22377064+0.07842041j -0.21784975-0.27541439j\n",
|
||||
" -0.27208941+0.04098933j -0.22748127+0.04185292j 0.17105258-0.10503745j\n",
|
||||
" -0.01729753-0.31866731j]\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(f\"Probabilities:\\n {outcome.probabilities()}\\n\")\n",
|
||||
"print(f\"State:\\n {outcome.state()}\\n\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9f477388-ca45-409a-a0ce-6603ec294e42",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"\n",
|
||||
"But frequencies cannot be accessed, since no shots have been set."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "8e9413c7-602a-44ed-a50c-1c3dd4dd7494",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"\n",
|
||||
"We can then repeat the execution by setting the number of shots"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "68122cd3-662f-4fd1-bb9c-d33b6f5448dd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'nqubits': 4,\n",
|
||||
" 'backend': QMatchaTeaBackend(),\n",
|
||||
" 'measures': {'0000': 92,\n",
|
||||
" '0001': 7,\n",
|
||||
" '0010': 85,\n",
|
||||
" '0011': 79,\n",
|
||||
" '0100': 81,\n",
|
||||
" '0101': 55,\n",
|
||||
" '0110': 47,\n",
|
||||
" '0111': 39,\n",
|
||||
" '1000': 117,\n",
|
||||
" '1001': 7,\n",
|
||||
" '1010': 38,\n",
|
||||
" '1011': 53,\n",
|
||||
" '1100': 22,\n",
|
||||
" '1101': 129,\n",
|
||||
" '1110': 74,\n",
|
||||
" '1111': 99},\n",
|
||||
" 'measured_probabilities': {'U': [None],\n",
|
||||
" 'E': {'0000': 0.08390937969317301,\n",
|
||||
" '0010': 0.09466860481989439,\n",
|
||||
" '0011': 0.07571277233522165,\n",
|
||||
" '0100': 0.07142939529687124,\n",
|
||||
" '0101': 0.05622305772698632,\n",
|
||||
" '0110': 0.05146064807369245,\n",
|
||||
" '1000': 0.11330883548333581,\n",
|
||||
" '1011': 0.053499396925872765,\n",
|
||||
" '1101': 0.12331159869893296,\n",
|
||||
" '1110': 0.07174919872960005,\n",
|
||||
" '1111': 0.10184806171792007},\n",
|
||||
" 'G': [None]},\n",
|
||||
" 'prob_type': 'E',\n",
|
||||
" 'statevector': array([ 0.08809627-0.27595005j, 0.24859731-0.22695421j,\n",
|
||||
" 0.18807826+0.18988408j, 0.09444097+0.06846085j,\n",
|
||||
" 0.00470148+0.30764671j, 0.17371395-0.09247188j,\n",
|
||||
" -0.18900305+0.12545316j, -0.17359753+0.20399288j,\n",
|
||||
" -0.0517478 +0.04471215j, -0.0411739 -0.06230031j,\n",
|
||||
" 0.22377064+0.07842041j, -0.21784975-0.27541439j,\n",
|
||||
" -0.27208941+0.04098933j, -0.22748127+0.04185292j,\n",
|
||||
" 0.17105258-0.10503745j, -0.01729753-0.31866731j])}"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Execution with a specific probability type\n",
|
||||
"# We use here \"E\", which is cutting some of the components if under a threshold\n",
|
||||
"outcome = qmatcha_backend.execute_circuit(\n",
|
||||
" circuit=circuit,\n",
|
||||
" nshots=1024,\n",
|
||||
" prob_type=\"E\",\n",
|
||||
" prob_threshold=0.05,\n",
|
||||
" return_array=True\n",
|
||||
")\n",
|
||||
"\n",
|
||||
"# Print outcome\n",
|
||||
"vars(outcome)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "ef0e9591-ccca-4cdd-a81b-2bfb3caaf3d0",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Frequencies:\n",
|
||||
" {'0000': 92, '0001': 7, '0010': 85, '0011': 79, '0100': 81, '0101': 55, '0110': 47, '0111': 39, '1000': 117, '1001': 7, '1010': 38, '1011': 53, '1100': 22, '1101': 129, '1110': 74, '1111': 99}\n",
|
||||
"\n",
|
||||
"Probabilities:\n",
|
||||
" [0.08390938 0.0946686 0.07571277 0.0714294 0.05622306 0.05146065\n",
|
||||
" 0.11330884 0.0534994 0.1233116 0.0717492 0.10184806]\n",
|
||||
"\n",
|
||||
"State:\n",
|
||||
" [ 0.08809627-0.27595005j 0.24859731-0.22695421j 0.18807826+0.18988408j\n",
|
||||
" 0.09444097+0.06846085j 0.00470148+0.30764671j 0.17371395-0.09247188j\n",
|
||||
" -0.18900305+0.12545316j -0.17359753+0.20399288j -0.0517478 +0.04471215j\n",
|
||||
" -0.0411739 -0.06230031j 0.22377064+0.07842041j -0.21784975-0.27541439j\n",
|
||||
" -0.27208941+0.04098933j -0.22748127+0.04185292j 0.17105258-0.10503745j\n",
|
||||
" -0.01729753-0.31866731j]\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Frequencies and probabilities\n",
|
||||
"print(f\"Frequencies:\\n {outcome.frequencies()}\\n\")\n",
|
||||
"print(f\"Probabilities:\\n {outcome.probabilities()}\\n\")\n",
|
||||
"print(f\"State:\\n {outcome.state()}\\n\") # Only if return_array = True"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "dd84f1f3-7aa5-4ad1-ae09-81e0aff75b5b",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Compute expectation values\n",
|
||||
"\n",
|
||||
"Another important feature of this backend is the `expectation` function. In fact, we can compute expectation values of given observables thorugh a Qibo-friendly interface.\n",
|
||||
"\n",
|
||||
"---\n",
|
||||
"\n",
|
||||
"Let's start by importing some symbols, thanks to which we can build our observable."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "0b46e315-7786-4247-bd2a-83ea1c5842eb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from qibo.symbols import Z, X"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "37385485-e8a3-4ab0-ad44-bcc4e9da24ca",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"0: ─RY─RZ─o─────X─RY─RZ─o─────X─RY─RZ─o─────X─M─\n",
|
||||
"1: ─RY─RZ─X─o───|─RY─RZ─X─o───|─RY─RZ─X─o───|─M─\n",
|
||||
"2: ─RY─RZ───X─o─|─RY─RZ───X─o─|─RY─RZ───X─o─|─M─\n",
|
||||
"3: ─RY─RZ─────X─o─RY─RZ─────X─o─RY─RZ─────X─o─M─\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# We are going to compute the expval of an Hamiltonian\n",
|
||||
"# On the state prepared by the following circuit\n",
|
||||
"circuit.draw()\n",
|
||||
"\n",
|
||||
"circuit.set_parameters(\n",
|
||||
" np.random.randn(len(circuit.get_parameters()))\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"id": "ddecc910-7804-4199-8577-a7db38a16db8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[Qibo 0.2.15|INFO|2025-02-12 14:36:17]: Using qibojit (numba) backend on /CPU:0\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/latex": [
|
||||
"$\\displaystyle - 1.5 X_{0} Z_{2} + 0.5 Z_{0} Z_{1} + Z_{3}$"
|
||||
],
|
||||
"text/plain": [
|
||||
"-1.5*X0*Z2 + 0.5*Z0*Z1 + Z3"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# We can create a symbolic Hamiltonian\n",
|
||||
"form = 0.5 * Z(0) * Z(1) +- 1.5 * X(0) * Z(2) + Z(3)\n",
|
||||
"hamiltonian = hamiltonians.SymbolicHamiltonian(form)\n",
|
||||
"\n",
|
||||
"# Let's show it\n",
|
||||
"hamiltonian.form"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "163b70a3-814a-4a62-a98a-2ffca933a544",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"0.4355195352502318"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# And compute its expectation value\n",
|
||||
"qmatcha_backend.expectation(\n",
|
||||
" circuit=circuit,\n",
|
||||
" observable=hamiltonian,\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "2d8c4a9c-eca3-49d0-bdbf-ab054172c4e5",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"0.43551953525022985"
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# Try with Qibo (which is by default using the Qibojit backend)\n",
|
||||
"hamiltonian = hamiltonians.SymbolicHamiltonian(form)\n",
|
||||
"hamiltonian.expectation(circuit().state())"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "94df291c-9ddc-4b2e-8442-5fca00784bd8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"They match! 🥳"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.10.0"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
572
examples/quimb_intro/quimb_introduction.ipynb
Normal file
572
examples/quimb_intro/quimb_introduction.ipynb
Normal file
@@ -0,0 +1,572 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "656bb283-ac6d-48d2-a029-3c417c9961f8",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"## Introduction to Quimb backend in QiboTN\n",
|
||||
"\n",
|
||||
"#### Some imports"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "6722d94e-e311-48f9-b6df-c6d829bf67fb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import time\n",
|
||||
"import numpy as np\n",
|
||||
"# from scipy import stats\n",
|
||||
"\n",
|
||||
"# import qibo\n",
|
||||
"from qibo import Circuit, gates, hamiltonians\n",
|
||||
"from qibo.backends import construct_backend"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "0c5a8939",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Some hyper parameters"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "64162116-1555-4a68-811c-01593739d622",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# construct qibotn backend\n",
|
||||
"quimb_backend = construct_backend(backend=\"qibotn\", platform=\"quimb\")\n",
|
||||
"\n",
|
||||
"# set number of qubits\n",
|
||||
"nqubits = 4\n",
|
||||
"\n",
|
||||
"# set numpy random seed\n",
|
||||
"np.random.seed(42)\n",
|
||||
"\n",
|
||||
"quimb_backend.setup_backend_specifics(quimb_backend=\"jax\", contractions_optimizer='auto-hq')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "926cfea5",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Quimb accepts different methods for optimizing the way it does contractions, that we pass through \"contractions_optimizer\". \n",
|
||||
"We could also define our own cotengra contraction optimizer! \n",
|
||||
"\n",
|
||||
"cotengra is a Python library designed for **optimising contraction trees** and performing efficient contractions of large tensor‐networks.\n",
|
||||
"You can find it here: [https://github.com/jcmgray/cotengra](https://github.com/jcmgray/cotengra)\n",
|
||||
"\n",
|
||||
"For the sake of this tutorial however the default \"auto-hq\" will be fine :) "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "b0a1da82",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import cotengra as ctg\n",
|
||||
"ctg_opt = ctg.ReusableHyperOptimizer(\n",
|
||||
" max_time=10,\n",
|
||||
" minimize='combo',\n",
|
||||
" slicing_opts=None,\n",
|
||||
" parallel=True,\n",
|
||||
" progbar=True\n",
|
||||
")\n",
|
||||
"# quimb_backend.setup_backend_specifics(quimb_backend=\"jax\", contractions_optimizer='ctg_opt')"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "252f5cd1-5932-4de6-8076-4a357d50ebad",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Constructing a parametric quantum circuit"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "4a22a172-f50d-411d-afa3-fa61937c7b3a",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def build_circuit(nqubits, nlayers):\n",
|
||||
" \"\"\"Construct a parametric quantum circuit.\"\"\"\n",
|
||||
" circ = Circuit(nqubits)\n",
|
||||
" for _ in range(nlayers):\n",
|
||||
" for q in range(nqubits):\n",
|
||||
" circ.add(gates.RY(q=q, theta=0.))\n",
|
||||
" circ.add(gates.RZ(q=q, theta=0.))\n",
|
||||
" [circ.add(gates.CNOT(q%nqubits, (q+1)%nqubits) for q in range(nqubits))]\n",
|
||||
" circ.add(gates.M(*range(nqubits)))\n",
|
||||
" return circ"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "76f23c57-6d08-496b-9a27-52fb63bbfcb1",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"0: ─RY─RZ─o─────X─RY─RZ─o─────X─RY─RZ─o─────X─M─\n",
|
||||
"1: ─RY─RZ─X─o───|─RY─RZ─X─o───|─RY─RZ─X─o───|─M─\n",
|
||||
"2: ─RY─RZ───X─o─|─RY─RZ───X─o─|─RY─RZ───X─o─|─M─\n",
|
||||
"3: ─RY─RZ─────X─o─RY─RZ─────X─o─RY─RZ─────X─o─M─\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"circuit = build_circuit(nqubits=nqubits, nlayers=3)\n",
|
||||
"circuit.draw()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "07b2c097-cea2-42ec-8f1d-b4bbb5b71d98",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Setting random parameters\n",
|
||||
"circuit.set_parameters(\n",
|
||||
" parameters=np.random.uniform(-np.pi, np.pi, len(circuit.get_parameters())),\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "fd0cea52-03f5-4366-a01a-a5a84aa8ebc7",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Setting up the tensor network simulator\n",
|
||||
"\n",
|
||||
"Depending on the simulator, various parameters can be set. One can customize the tensor network execution via the `backend.configure_tn_simulation` function, whose face depends on the specific backend provider."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "2ee03e94-d794-4a51-9e76-01e8d8a259ba",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Customization of the tensor network simulation in the case of quimb backend\n",
|
||||
"# Here we use only some of the possible arguments\n",
|
||||
"quimb_backend.configure_tn_simulation(\n",
|
||||
" #ansatz=\"MPS\",\n",
|
||||
" max_bond_dimension=10\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "648d85b8-445d-4081-aeed-1691fbae67be",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"#### Executing through the backend\n",
|
||||
"\n",
|
||||
"The `backend.execute_circuit` method can be used then. We can simulate results in three ways:\n",
|
||||
"1. reconstruction of the final state only if `return_array` is set to `True`;\n",
|
||||
"2. computation of the relevant probabilities of the final state.\n",
|
||||
"3. reconstruction of the relevant state's frequencies (only if `nshots` is not `None`)."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "35a244c3-adba-4b8b-b28c-0ab592b0f7cf",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/home/andrea/python_envs/3.11/lib/python3.11/site-packages/quimb/tensor/circuit.py:215: SyntaxWarning: Unsupported operation ignored: creg\n",
|
||||
" warnings.warn(\n",
|
||||
"/home/andrea/python_envs/3.11/lib/python3.11/site-packages/quimb/tensor/circuit.py:215: SyntaxWarning: Unsupported operation ignored: measure\n",
|
||||
" warnings.warn(\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"{'nqubits': 4,\n",
|
||||
" 'backend': qibotn (quimb),\n",
|
||||
" 'measures': Counter({'1101': 14,\n",
|
||||
" '1000': 12,\n",
|
||||
" '0010': 11,\n",
|
||||
" '0011': 11,\n",
|
||||
" '0110': 9,\n",
|
||||
" '0000': 8,\n",
|
||||
" '1010': 7,\n",
|
||||
" '1110': 6,\n",
|
||||
" '0100': 5,\n",
|
||||
" '1111': 5,\n",
|
||||
" '1011': 5,\n",
|
||||
" '0101': 4,\n",
|
||||
" '0111': 1,\n",
|
||||
" '0001': 1,\n",
|
||||
" '1100': 1}),\n",
|
||||
" 'measured_probabilities': {'1101': np.float64(0.12331159869893284),\n",
|
||||
" '1000': np.float64(0.11330883548333684),\n",
|
||||
" '0010': np.float64(0.0946686048198943),\n",
|
||||
" '0011': np.float64(0.07571277233522157),\n",
|
||||
" '0110': np.float64(0.051460648073692314),\n",
|
||||
" '0000': np.float64(0.08390937969317334),\n",
|
||||
" '1010': np.float64(0.03872758515126775),\n",
|
||||
" '1110': np.float64(0.07174919872960006),\n",
|
||||
" '0100': np.float64(0.07142939529687146),\n",
|
||||
" '1111': np.float64(0.10184806171791994),\n",
|
||||
" '1011': np.float64(0.053499396925872716),\n",
|
||||
" '0101': np.float64(0.05622305772698606),\n",
|
||||
" '0111': np.float64(0.040291850747292815),\n",
|
||||
" '0001': np.float64(0.004677011195208322),\n",
|
||||
" '1100': np.float64(0.013605984872668443)},\n",
|
||||
" 'prob_type': 'default',\n",
|
||||
" 'statevector': Array([[ 0.08809626-0.27595j ],\n",
|
||||
" [-0.05174781+0.04471214j],\n",
|
||||
" [ 0.00470146+0.30764672j],\n",
|
||||
" [-0.27208942+0.04098931j],\n",
|
||||
" [ 0.18807825+0.1898841j ],\n",
|
||||
" [ 0.22377063+0.07842041j],\n",
|
||||
" [-0.18900302+0.12545316j],\n",
|
||||
" [ 0.17105258-0.10503745j],\n",
|
||||
" [ 0.24859732-0.22695422j],\n",
|
||||
" [-0.04117391-0.0623003j ],\n",
|
||||
" [ 0.17371394-0.09247189j],\n",
|
||||
" [-0.22748126+0.04185291j],\n",
|
||||
" [ 0.09444097+0.06846087j],\n",
|
||||
" [-0.21784975-0.2754144j ],\n",
|
||||
" [-0.17359754+0.20399287j],\n",
|
||||
" [-0.01729751-0.31866732j]], dtype=complex64)}"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# # Simple execution (defaults)\n",
|
||||
"outcome = quimb_backend.execute_circuit(circuit=circuit, nshots=100, return_array=True)\n",
|
||||
"\n",
|
||||
"# # Print outcome\n",
|
||||
"vars(outcome)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "84ec0b48-f6b4-495c-93b8-8e42d1a8b0df",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"---\n",
|
||||
"\n",
|
||||
"One can access to the specific contents of the simulation outcome."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "c0443efc-21ef-4ed5-9cf4-785d204a1881",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Probabilities:\n",
|
||||
" {'1101': np.float64(0.12331159869893284), '1000': np.float64(0.11330883548333684), '0010': np.float64(0.0946686048198943), '0011': np.float64(0.07571277233522157), '0110': np.float64(0.051460648073692314), '0000': np.float64(0.08390937969317334), '1010': np.float64(0.03872758515126775), '1110': np.float64(0.07174919872960006), '0100': np.float64(0.07142939529687146), '1111': np.float64(0.10184806171791994), '1011': np.float64(0.053499396925872716), '0101': np.float64(0.05622305772698606), '0111': np.float64(0.040291850747292815), '0001': np.float64(0.004677011195208322), '1100': np.float64(0.013605984872668443)}\n",
|
||||
"\n",
|
||||
"State:\n",
|
||||
" [[ 0.08809626-0.27595j ]\n",
|
||||
" [-0.05174781+0.04471214j]\n",
|
||||
" [ 0.00470146+0.30764672j]\n",
|
||||
" [-0.27208942+0.04098931j]\n",
|
||||
" [ 0.18807825+0.1898841j ]\n",
|
||||
" [ 0.22377063+0.07842041j]\n",
|
||||
" [-0.18900302+0.12545316j]\n",
|
||||
" [ 0.17105258-0.10503745j]\n",
|
||||
" [ 0.24859732-0.22695422j]\n",
|
||||
" [-0.04117391-0.0623003j ]\n",
|
||||
" [ 0.17371394-0.09247189j]\n",
|
||||
" [-0.22748126+0.04185291j]\n",
|
||||
" [ 0.09444097+0.06846087j]\n",
|
||||
" [-0.21784975-0.2754144j ]\n",
|
||||
" [-0.17359754+0.20399287j]\n",
|
||||
" [-0.01729751-0.31866732j]]\n",
|
||||
"\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(f\"Probabilities:\\n {outcome.probabilities()}\\n\")\n",
|
||||
"print(f\"State:\\n {outcome.state()}\\n\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "9531f9d6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"### Compute expectation values\n",
|
||||
"\n",
|
||||
"Another important feature of this backend is the `expectation` function. In fact, we can compute expectation values of given observables thorugh a Qibo-friendly interface.\n",
|
||||
"\n",
|
||||
"---\n",
|
||||
"\n",
|
||||
"Let's start by importing some symbols, thanks to which we can build our observable."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "647f2073",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import numpy as np\n",
|
||||
"import jax\n",
|
||||
"from qibo.backends import construct_backend\n",
|
||||
"from qibo import Circuit, gates"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "74c63a41",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# construct qibotn backend\n",
|
||||
"quimb_backend = construct_backend(backend=\"qibotn\", platform=\"quimb\")\n",
|
||||
"\n",
|
||||
"quimb_backend.setup_backend_specifics(\n",
|
||||
" quimb_backend =\"jax\", \n",
|
||||
" contractions_optimizer='auto-hq'\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"quimb_backend.configure_tn_simulation(\n",
|
||||
" max_bond_dimension=10\n",
|
||||
")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "b2a0decb",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"from qibo.symbols import X, Z, Y\n",
|
||||
"from qibo.hamiltonians import XXZ\n",
|
||||
"\n",
|
||||
"# define Hamiltonian\n",
|
||||
"hamiltonian = XXZ(4, dense=False, backend=quimb_backend)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "bd734be8",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# define circuit\n",
|
||||
"def build_circuit(nqubits, nlayers):\n",
|
||||
" circ = Circuit(nqubits)\n",
|
||||
" for layer in range(nlayers):\n",
|
||||
" for q in range(nqubits):\n",
|
||||
" circ.add(gates.RY(q=q, theta=0.))\n",
|
||||
" circ.add(gates.RZ(q=q, theta=0.))\n",
|
||||
" circ.add(gates.RX(q=q, theta=0.))\n",
|
||||
" for q in range(nqubits - 1):\n",
|
||||
" circ.add(gates.CNOT(q, q + 1))\n",
|
||||
" circ.add(gates.SWAP(q, q + 1))\n",
|
||||
" circ.add(gates.M(*range(nqubits)))\n",
|
||||
" return circ\n",
|
||||
"\n",
|
||||
"def build_circuit_problematic(nqubits, nlayers):\n",
|
||||
" circ = Circuit(nqubits)\n",
|
||||
" for _ in range(nlayers):\n",
|
||||
" for q in range(nqubits):\n",
|
||||
" circ.add(gates.RY(q=q, theta=0.))\n",
|
||||
" circ.add(gates.RZ(q=q, theta=0.))\n",
|
||||
" [circ.add(gates.CNOT(q%nqubits, (q+1)%nqubits) for q in range(nqubits))]\n",
|
||||
" circ.add(gates.M(*range(nqubits)))\n",
|
||||
" return circ\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"nqubits = 4\n",
|
||||
"circuit = build_circuit(nqubits=nqubits, nlayers=3)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 20,
|
||||
"id": "fe63ff24",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Expectation value: 2.0\n",
|
||||
"Elapsed time: 0.0268 seconds\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"start = time.time()\n",
|
||||
"expval = hamiltonian.expectation(circuit)\n",
|
||||
"\n",
|
||||
"elapsed = time.time() - start\n",
|
||||
"print(f\"Expectation value: {expval}\")\n",
|
||||
"print(f\"Elapsed time: {elapsed:.4f} seconds\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "d976a849",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"Try with Qibo (which is by default using the Qibojit backend)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"id": "fb1436c8",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[Qibo 0.2.21|INFO|2025-10-27 16:24:00]: Using numpy backend on /CPU:0\n",
|
||||
"WARNING:root:Calculation of expectation values starting from the state is deprecated, use the ``expectation_from_state`` method if you really need it, or simply pass the circuit you want to calculate the expectation value from.\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Expectation value: 2.0\n",
|
||||
"Elapsed time: 0.0360 seconds\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"sym_hamiltonian = XXZ(4, dense=False, backend=None)\n",
|
||||
"\n",
|
||||
"# Let's show it\n",
|
||||
"sym_hamiltonian.form\n",
|
||||
"\n",
|
||||
"# Compute expectation value\n",
|
||||
"start = time.time()\n",
|
||||
"result = sym_hamiltonian.expectation(circuit().state())\n",
|
||||
"elapsed = time.time() - start\n",
|
||||
"print(f\"Expectation value: {result}\")\n",
|
||||
"print(f\"Elapsed time: {elapsed:.4f} seconds\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "77bef077",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"They match! 🥳"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "50130ae6",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"We can also compute gradient of expectation function"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"id": "6a3b26e4",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"/home/andrea/python_envs/3.11/lib/python3.11/site-packages/quimb/tensor/circuit.py:4927: UserWarning: Unsupported options for computing local_expectation with an MPS circuit supplied, ignoring: R, None, None, jax, None\n",
|
||||
" warnings.warn(\n",
|
||||
"/home/andrea/python_envs/3.11/lib/python3.11/site-packages/quimb/tensor/circuit.py:4927: UserWarning: Unsupported options for computing local_expectation with an MPS circuit supplied, ignoring: R, None, None, jax, None\n",
|
||||
" warnings.warn(\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[-0.24630009 0.8370421 -0.11103702 -0.12855841 0.41325414 -0.0628037\n",
|
||||
" 0.51638705 0.794163 -0.27972788 -1.0718998 0.02731732 1.0153619\n",
|
||||
" -0.34494495 1.5744264 0.26920277 -0.36333832 0.12331417 0.5196531\n",
|
||||
" 1.1294655 0.29257926 -0.18237355 0.8914014 -0.9471657 0.3492473\n",
|
||||
" -0.3477673 0.24325958 0.04818404 -0.87983793 0.47196424 0.36605012\n",
|
||||
" 1.005 0.65054715 -0.94860053 0.14459445 0.36571163 -0.2550101 ]\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"def f(circuit, hamiltonian, params):\n",
|
||||
" circuit.set_parameters(params)\n",
|
||||
" return hamiltonian.expectation(\n",
|
||||
" circuit=circuit,\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
"parameters = np.random.uniform(-np.pi, np.pi, size=len(circuit.get_parameters()))\n",
|
||||
"print(jax.grad(f, argnums=2)(circuit, hamiltonian, parameters))\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "aeafa5a6-2afa-429c-a101-effa84bac1d2",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3 (ipykernel)",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.11.12"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
323
flake.lock
generated
Normal file
323
flake.lock
generated
Normal file
@@ -0,0 +1,323 @@
|
||||
{
|
||||
"nodes": {
|
||||
"devenv": {
|
||||
"inputs": {
|
||||
"flake-compat": "flake-compat",
|
||||
"nix": "nix",
|
||||
"nixpkgs": "nixpkgs",
|
||||
"pre-commit-hooks": "pre-commit-hooks"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1710144971,
|
||||
"narHash": "sha256-CjTOdoBvT/4AQncTL20SDHyJNgsXZjtGbz62yDIUYnM=",
|
||||
"owner": "cachix",
|
||||
"repo": "devenv",
|
||||
"rev": "6c0bad0045f1e1802f769f7890f6a59504825f4d",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "cachix",
|
||||
"repo": "devenv",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-compat": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1673956053,
|
||||
"narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-compat_2": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1696426674,
|
||||
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "edolstra",
|
||||
"repo": "flake-compat",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils": {
|
||||
"inputs": {
|
||||
"systems": "systems"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1685518550,
|
||||
"narHash": "sha256-o2d0KcvaXzTrPRIo0kOLV0/QXHhDQ5DTi+OxcjO8xqY=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "a1720a10a6cfe8234c0e93907ffe81be440f4cef",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"flake-utils_2": {
|
||||
"inputs": {
|
||||
"systems": "systems_2"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1701680307,
|
||||
"narHash": "sha256-kAuep2h5ajznlPMD9rnQyffWG8EM/C73lejGofXvdM8=",
|
||||
"owner": "numtide",
|
||||
"repo": "flake-utils",
|
||||
"rev": "4022d587cbbfd70fe950c1e2083a02621806a725",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"id": "flake-utils",
|
||||
"type": "indirect"
|
||||
}
|
||||
},
|
||||
"gitignore": {
|
||||
"inputs": {
|
||||
"nixpkgs": [
|
||||
"devenv",
|
||||
"pre-commit-hooks",
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1660459072,
|
||||
"narHash": "sha256-8DFJjXG8zqoONA1vXtgeKXy68KdJL5UaXR8NtVMUbx8=",
|
||||
"owner": "hercules-ci",
|
||||
"repo": "gitignore.nix",
|
||||
"rev": "a20de23b925fd8264fd7fad6454652e142fd7f73",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "hercules-ci",
|
||||
"repo": "gitignore.nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"lowdown-src": {
|
||||
"flake": false,
|
||||
"locked": {
|
||||
"lastModified": 1633514407,
|
||||
"narHash": "sha256-Dw32tiMjdK9t3ETl5fzGrutQTzh2rufgZV4A/BbxuD4=",
|
||||
"owner": "kristapsdz",
|
||||
"repo": "lowdown",
|
||||
"rev": "d2c2b44ff6c27b936ec27358a2653caaef8f73b8",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "kristapsdz",
|
||||
"repo": "lowdown",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nix": {
|
||||
"inputs": {
|
||||
"lowdown-src": "lowdown-src",
|
||||
"nixpkgs": [
|
||||
"devenv",
|
||||
"nixpkgs"
|
||||
],
|
||||
"nixpkgs-regression": "nixpkgs-regression"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1676545802,
|
||||
"narHash": "sha256-EK4rZ+Hd5hsvXnzSzk2ikhStJnD63odF7SzsQ8CuSPU=",
|
||||
"owner": "domenkozar",
|
||||
"repo": "nix",
|
||||
"rev": "7c91803598ffbcfe4a55c44ac6d49b2cf07a527f",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "domenkozar",
|
||||
"ref": "relaxed-flakes",
|
||||
"repo": "nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs": {
|
||||
"locked": {
|
||||
"lastModified": 1678875422,
|
||||
"narHash": "sha256-T3o6NcQPwXjxJMn2shz86Chch4ljXgZn746c2caGxd8=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "126f49a01de5b7e35a43fd43f891ecf6d3a51459",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixpkgs-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs-python": {
|
||||
"inputs": {
|
||||
"flake-compat": "flake-compat_2",
|
||||
"flake-utils": "flake-utils_2",
|
||||
"nixpkgs": [
|
||||
"nixpkgs"
|
||||
]
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1709875392,
|
||||
"narHash": "sha256-ZC/8TNR2q8Q+j4vzaW3B8jLS9ZDvss61brFW4VtWr0A=",
|
||||
"owner": "cachix",
|
||||
"repo": "nixpkgs-python",
|
||||
"rev": "7296d316153575b8db614ff02dac5b7501a92071",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "cachix",
|
||||
"repo": "nixpkgs-python",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs-regression": {
|
||||
"locked": {
|
||||
"lastModified": 1643052045,
|
||||
"narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs-stable": {
|
||||
"locked": {
|
||||
"lastModified": 1685801374,
|
||||
"narHash": "sha256-otaSUoFEMM+LjBI1XL/xGB5ao6IwnZOXc47qhIgJe8U=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "c37ca420157f4abc31e26f436c1145f8951ff373",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-23.05",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"nixpkgs_2": {
|
||||
"locked": {
|
||||
"lastModified": 1710272261,
|
||||
"narHash": "sha256-g0bDwXFmTE7uGDOs9HcJsfLFhH7fOsASbAuOzDC+fhQ=",
|
||||
"owner": "NixOS",
|
||||
"repo": "nixpkgs",
|
||||
"rev": "0ad13a6833440b8e238947e47bea7f11071dc2b2",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "NixOS",
|
||||
"ref": "nixos-unstable",
|
||||
"repo": "nixpkgs",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"pre-commit-hooks": {
|
||||
"inputs": {
|
||||
"flake-compat": [
|
||||
"devenv",
|
||||
"flake-compat"
|
||||
],
|
||||
"flake-utils": "flake-utils",
|
||||
"gitignore": "gitignore",
|
||||
"nixpkgs": [
|
||||
"devenv",
|
||||
"nixpkgs"
|
||||
],
|
||||
"nixpkgs-stable": "nixpkgs-stable"
|
||||
},
|
||||
"locked": {
|
||||
"lastModified": 1704725188,
|
||||
"narHash": "sha256-qq8NbkhRZF1vVYQFt1s8Mbgo8knj+83+QlL5LBnYGpI=",
|
||||
"owner": "cachix",
|
||||
"repo": "pre-commit-hooks.nix",
|
||||
"rev": "ea96f0c05924341c551a797aaba8126334c505d2",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "cachix",
|
||||
"repo": "pre-commit-hooks.nix",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"root": {
|
||||
"inputs": {
|
||||
"devenv": "devenv",
|
||||
"nixpkgs": "nixpkgs_2",
|
||||
"nixpkgs-python": "nixpkgs-python",
|
||||
"systems": "systems_3"
|
||||
}
|
||||
},
|
||||
"systems": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"systems_2": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
},
|
||||
"systems_3": {
|
||||
"locked": {
|
||||
"lastModified": 1681028828,
|
||||
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
|
||||
"type": "github"
|
||||
},
|
||||
"original": {
|
||||
"owner": "nix-systems",
|
||||
"repo": "default",
|
||||
"type": "github"
|
||||
}
|
||||
}
|
||||
},
|
||||
"root": "root",
|
||||
"version": 7
|
||||
}
|
||||
61
flake.nix
Normal file
61
flake.nix
Normal file
@@ -0,0 +1,61 @@
|
||||
{
|
||||
inputs = {
|
||||
nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable";
|
||||
systems.url = "github:nix-systems/default";
|
||||
devenv.url = "github:cachix/devenv";
|
||||
nixpkgs-python = {
|
||||
url = "github:cachix/nixpkgs-python";
|
||||
inputs.nixpkgs.follows = "nixpkgs";
|
||||
};
|
||||
};
|
||||
|
||||
outputs = {
|
||||
self,
|
||||
nixpkgs,
|
||||
devenv,
|
||||
systems,
|
||||
...
|
||||
} @ inputs: let
|
||||
forEachSystem = nixpkgs.lib.genAttrs (import systems);
|
||||
in {
|
||||
# packages = forEachSystem (system: {
|
||||
# default =
|
||||
# nixpkgs.legacyPackages.${system}.poetry2nix.mkPoetryApplication
|
||||
# {
|
||||
# projectDir = self;
|
||||
# preferWheels = true;
|
||||
# };
|
||||
# });
|
||||
|
||||
devShells =
|
||||
forEachSystem
|
||||
(system: let
|
||||
pkgs = nixpkgs.legacyPackages.${system};
|
||||
in {
|
||||
default = devenv.lib.mkShell {
|
||||
inherit inputs pkgs;
|
||||
|
||||
modules = [
|
||||
{
|
||||
packages = with pkgs; [pre-commit poethepoet stdenv.cc.cc.lib];
|
||||
|
||||
languages.python = {
|
||||
enable = true;
|
||||
poetry = {
|
||||
enable = true;
|
||||
install.enable = true;
|
||||
install.groups = ["dev" "analysis" "tests"];
|
||||
};
|
||||
version = "3.11";
|
||||
};
|
||||
}
|
||||
];
|
||||
};
|
||||
});
|
||||
};
|
||||
|
||||
nixConfig = {
|
||||
extra-trusted-public-keys = "devenv.cachix.org-1:w1cLUi8dv3hnoSPGAuibQv+f9TZLr6cv/Hm9XgU50cw=";
|
||||
extra-substituters = "https://devenv.cachix.org";
|
||||
};
|
||||
}
|
||||
3944
poetry.lock
generated
Normal file
3944
poetry.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
84
pyproject.toml
Normal file
84
pyproject.toml
Normal file
@@ -0,0 +1,84 @@
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
[tool.poetry]
|
||||
name = "qibotn"
|
||||
version = "0.0.7"
|
||||
description = "A tensor-network translation module for Qibo"
|
||||
authors = ["The Qibo team"]
|
||||
license = "Apache License 2.0"
|
||||
readme = "README.md"
|
||||
homepage = "https://qibo.science/"
|
||||
repository = "https://github.com/qiboteam/qibotn/"
|
||||
documentation = "https://qibo.science/docs/qibotn/stable"
|
||||
keywords = []
|
||||
classifiers = [
|
||||
"Programming Language :: Python :: 3",
|
||||
"Topic :: Scientific/Engineering :: Physics",
|
||||
]
|
||||
packages = [{ include = "qibotn", from = "src" }]
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = ">=3.11,<3.14"
|
||||
qibo = "^0.3.0"
|
||||
qibojit = "^0.1.13"
|
||||
quimb = { version = "^1.10.0", extras = ["tensor"] }
|
||||
cupy-cuda12x = { version = "^13.6.0", optional = true }
|
||||
cuda-toolkit = {extras = ["all"], version = "^12.9.1"}
|
||||
nvidia-nccl-cu12 = { version = "^2.16.5", optional = true }
|
||||
cuquantum-python-cu12 = { version = "^25.9.1", optional = true }
|
||||
qmatchatea = { version = "^1.4.3", optional = true }
|
||||
qiskit = { version = "^1.4.0", optional = true }
|
||||
qtealeaves = { version = "^1.5.20", optional = true }
|
||||
|
||||
|
||||
[tool.poetry.extras]
|
||||
cuda = ["cupy-cuda12x", "cuda-toolkit", "nvidia-nccl-cu12", "cuquantum-python-cu12", "mpi4py"]
|
||||
qmatchatea = ["qmatchatea"]
|
||||
|
||||
[tool.poetry.group.docs]
|
||||
optional = true
|
||||
|
||||
[tool.poetry.group.docs.dependencies]
|
||||
Sphinx = "^5.3.0"
|
||||
furo = "^2023.3.27"
|
||||
sphinxcontrib-bibtex = "^2.5.0"
|
||||
sphinx-copybutton = "^0.5.2"
|
||||
sphinxcontrib-katex = "^0.9.9"
|
||||
|
||||
[tool.poetry.group.dev.dependencies]
|
||||
ipython = "^8.34.0"
|
||||
|
||||
[tool.poetry.group.tests]
|
||||
optional = true
|
||||
|
||||
[tool.poetry.group.tests.dependencies]
|
||||
pytest = ">=8,<10"
|
||||
pytest-cov = "^4.1.0"
|
||||
pytest-env = "^1.1.3"
|
||||
|
||||
[tool.poetry.group.analysis]
|
||||
optional = true
|
||||
|
||||
[tool.poetry.group.analysis.dependencies]
|
||||
pylint = "^3.0.3"
|
||||
|
||||
[tool.poe.tasks]
|
||||
test = "pytest"
|
||||
lint = "pylint src --errors-only"
|
||||
lint-warnings = "pylint src --exit-zero"
|
||||
docs = "make -C doc html"
|
||||
docs-clean = "make -C doc clean"
|
||||
test-docs = "make -C doc doctest"
|
||||
|
||||
[tool.pylint.main]
|
||||
ignored-modules = ["cupy", "cuquantum", "mpi4py"]
|
||||
|
||||
[tool.pylint.reports]
|
||||
output-format = "colorized"
|
||||
|
||||
[tool.pytest.ini_options]
|
||||
testpaths = ["tests/"]
|
||||
addopts = ["--cov=qibotn", "--cov-report=xml"]
|
||||
env = ["D:NUMBA_DISABLE_JIT=1"]
|
||||
5
src/qibotn/__init__.py
Normal file
5
src/qibotn/__init__.py
Normal file
@@ -0,0 +1,5 @@
|
||||
import importlib.metadata as im
|
||||
|
||||
from qibotn.backends import MetaBackend
|
||||
|
||||
__version__ = im.version(__package__)
|
||||
55
src/qibotn/backends/__init__.py
Normal file
55
src/qibotn/backends/__init__.py
Normal file
@@ -0,0 +1,55 @@
|
||||
from typing import Union
|
||||
|
||||
from qibo.config import raise_error
|
||||
|
||||
from qibotn.backends.abstract import QibotnBackend
|
||||
from qibotn.backends.cutensornet import CuTensorNet # pylint: disable=E0401
|
||||
|
||||
PLATFORMS = ("cutensornet", "quimb", "qmatchatea")
|
||||
|
||||
|
||||
class MetaBackend:
|
||||
"""Meta-backend class which takes care of loading the qibotn backends."""
|
||||
|
||||
@staticmethod
|
||||
def load(platform: str, runcard: dict = None, **kwargs) -> QibotnBackend:
|
||||
"""Loads the backend.
|
||||
|
||||
Args:
|
||||
platform (str): Name of the backend to load: either `cutensornet` or `qutensornet`.
|
||||
runcard (dict): Dictionary containing the simulation settings.
|
||||
Returns:
|
||||
qibo.backends.abstract.Backend: The loaded backend.
|
||||
"""
|
||||
|
||||
if platform == "cutensornet": # pragma: no cover
|
||||
return CuTensorNet(runcard)
|
||||
elif platform == "quimb": # pragma: no cover
|
||||
import qibotn.backends.quimb as qmb
|
||||
|
||||
quimb_backend = kwargs.get("quimb_backend", "numpy")
|
||||
contraction_optimizer = kwargs.get("contraction_optimizer", "auto-hq")
|
||||
return qmb.BACKENDS[quimb_backend](
|
||||
quimb_backend=quimb_backend, contraction_optimizer=contraction_optimizer
|
||||
)
|
||||
elif platform == "qmatchatea": # pragma: no cover
|
||||
from qibotn.backends.qmatchatea import QMatchaTeaBackend
|
||||
|
||||
return QMatchaTeaBackend()
|
||||
else:
|
||||
raise_error(
|
||||
NotImplementedError,
|
||||
f"Unsupported platform {platform}, please pick one in {PLATFORMS}",
|
||||
)
|
||||
|
||||
def list_available(self) -> dict:
|
||||
"""Lists all the available qibotn backends."""
|
||||
available_backends = {}
|
||||
for platform in PLATFORMS:
|
||||
try:
|
||||
MetaBackend.load(platform=platform)
|
||||
available = True
|
||||
except:
|
||||
available = False
|
||||
available_backends[platform] = available
|
||||
return available_backends
|
||||
35
src/qibotn/backends/abstract.py
Normal file
35
src/qibotn/backends/abstract.py
Normal file
@@ -0,0 +1,35 @@
|
||||
from abc import ABC
|
||||
|
||||
from qibo.config import raise_error
|
||||
|
||||
|
||||
class QibotnBackend(ABC):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def apply_gate(self, gate, state, nqubits): # pragma: no cover
|
||||
raise_error(NotImplementedError, "QiboTN cannot apply gates directly.")
|
||||
|
||||
def apply_gate_density_matrix(self, gate, state, nqubits): # pragma: no cover
|
||||
raise_error(NotImplementedError, "QiboTN cannot apply gates directly.")
|
||||
|
||||
def assign_measurements(self, measurement_map, circuit_result):
|
||||
raise_error(NotImplementedError, "Not implemented in QiboTN.")
|
||||
|
||||
def set_precision(self, precision):
|
||||
if precision != self.precision:
|
||||
super().set_precision(precision)
|
||||
self._setup_backend_specifics()
|
||||
|
||||
def set_device(self, device):
|
||||
self.device = device
|
||||
self._setup_backend_specifics()
|
||||
|
||||
def configure_tn_simulation(self, **config):
|
||||
"""Configure the TN simulation that will be performed."""
|
||||
pass
|
||||
|
||||
def _setup_backend_specifics(self):
|
||||
"""Configure the backend specific according to the used package."""
|
||||
pass
|
||||
169
src/qibotn/backends/cutensornet.py
Normal file
169
src/qibotn/backends/cutensornet.py
Normal file
@@ -0,0 +1,169 @@
|
||||
import numpy as np
|
||||
from qibo import hamiltonians
|
||||
from qibo.backends import NumpyBackend
|
||||
from qibo.config import raise_error
|
||||
|
||||
from qibotn.backends.abstract import QibotnBackend
|
||||
from qibotn.result import TensorNetworkResult
|
||||
|
||||
|
||||
class CuTensorNet(QibotnBackend, NumpyBackend): # pragma: no cover
|
||||
# CI does not test for GPU
|
||||
"""Creates CuQuantum backend for QiboTN."""
|
||||
|
||||
def __init__(self, runcard=None):
|
||||
super().__init__()
|
||||
from cuquantum import __version__ # pylint: disable=import-error
|
||||
|
||||
self.name = "qibotn"
|
||||
self.platform = "cutensornet"
|
||||
self.versions["cuquantum"] = __version__
|
||||
self.supports_multigpu = True
|
||||
self.configure_tn_simulation(runcard)
|
||||
|
||||
def configure_tn_simulation(self, runcard):
|
||||
self.rank = None
|
||||
if runcard is not None:
|
||||
self.MPI_enabled = runcard.get("MPI_enabled", False)
|
||||
self.NCCL_enabled = runcard.get("NCCL_enabled", False)
|
||||
|
||||
expectation_enabled_value = runcard.get("expectation_enabled")
|
||||
if expectation_enabled_value is True:
|
||||
self.expectation_enabled = True
|
||||
self.observable = None
|
||||
elif expectation_enabled_value is False:
|
||||
self.expectation_enabled = False
|
||||
elif isinstance(expectation_enabled_value, dict):
|
||||
self.expectation_enabled = True
|
||||
self.observable = runcard.get("expectation_enabled", {})
|
||||
elif isinstance(
|
||||
expectation_enabled_value, hamiltonians.SymbolicHamiltonian
|
||||
):
|
||||
self.expectation_enabled = True
|
||||
self.observable = expectation_enabled_value
|
||||
else:
|
||||
raise TypeError("expectation_enabled has an unexpected type")
|
||||
|
||||
mps_enabled_value = runcard.get("MPS_enabled")
|
||||
if mps_enabled_value is True:
|
||||
self.MPS_enabled = True
|
||||
self.gate_algo = {
|
||||
"qr_method": False,
|
||||
"svd_method": {
|
||||
"partition": "UV",
|
||||
"abs_cutoff": 1e-12,
|
||||
},
|
||||
}
|
||||
elif mps_enabled_value is False:
|
||||
self.MPS_enabled = False
|
||||
elif isinstance(mps_enabled_value, dict):
|
||||
self.MPS_enabled = True
|
||||
self.gate_algo = mps_enabled_value
|
||||
else:
|
||||
raise TypeError("MPS_enabled has an unexpected type")
|
||||
|
||||
else:
|
||||
self.MPI_enabled = False
|
||||
self.MPS_enabled = False
|
||||
self.NCCL_enabled = False
|
||||
self.expectation_enabled = False
|
||||
|
||||
def execute_circuit(
|
||||
self, circuit, initial_state=None, nshots=None, return_array=False
|
||||
): # pragma: no cover
|
||||
"""Executes a quantum circuit using selected TN backend.
|
||||
|
||||
Parameters:
|
||||
circuit (:class:`qibo.models.circuit.Circuit`): Circuit to execute.
|
||||
initial_state (:class:`qibo.models.circuit.Circuit`): Circuit to prepare the initial state.
|
||||
If ``None`` the default ``|00...0>`` state is used.
|
||||
|
||||
Returns:
|
||||
QuantumState or numpy.ndarray: If `return_array` is False, returns a QuantumState object representing the quantum state. If `return_array` is True, returns a numpy array representing the quantum state.
|
||||
"""
|
||||
|
||||
import qibotn.eval as eval
|
||||
|
||||
if initial_state is not None:
|
||||
raise_error(NotImplementedError, "QiboTN cannot support initial state.")
|
||||
|
||||
if (
|
||||
self.MPI_enabled == False
|
||||
and self.MPS_enabled == False
|
||||
and self.NCCL_enabled == False
|
||||
and self.expectation_enabled == False
|
||||
):
|
||||
state = eval.dense_vector_tn(circuit, self.dtype)
|
||||
elif (
|
||||
self.MPI_enabled == False
|
||||
and self.MPS_enabled == True
|
||||
and self.NCCL_enabled == False
|
||||
and self.expectation_enabled == False
|
||||
):
|
||||
state = eval.dense_vector_mps(circuit, self.gate_algo, self.dtype)
|
||||
elif (
|
||||
self.MPI_enabled == True
|
||||
and self.MPS_enabled == False
|
||||
and self.NCCL_enabled == False
|
||||
and self.expectation_enabled == False
|
||||
):
|
||||
state, self.rank = eval.dense_vector_tn_MPI(circuit, self.dtype, 32)
|
||||
if self.rank > 0:
|
||||
state = np.array(0)
|
||||
elif (
|
||||
self.MPI_enabled == False
|
||||
and self.MPS_enabled == False
|
||||
and self.NCCL_enabled == True
|
||||
and self.expectation_enabled == False
|
||||
):
|
||||
state, self.rank = eval.dense_vector_tn_nccl(circuit, self.dtype, 32)
|
||||
if self.rank > 0:
|
||||
state = np.array(0)
|
||||
elif (
|
||||
self.MPI_enabled == False
|
||||
and self.MPS_enabled == False
|
||||
and self.NCCL_enabled == False
|
||||
and self.expectation_enabled == True
|
||||
):
|
||||
state = eval.expectation_tn(circuit, self.dtype, self.observable)
|
||||
elif (
|
||||
self.MPI_enabled == True
|
||||
and self.MPS_enabled == False
|
||||
and self.NCCL_enabled == False
|
||||
and self.expectation_enabled == True
|
||||
):
|
||||
state, self.rank = eval.expectation_tn_MPI(
|
||||
circuit, self.dtype, self.observable, 32
|
||||
)
|
||||
if self.rank > 0:
|
||||
state = np.array(0)
|
||||
elif (
|
||||
self.MPI_enabled == False
|
||||
and self.MPS_enabled == False
|
||||
and self.NCCL_enabled == True
|
||||
and self.expectation_enabled == True
|
||||
):
|
||||
state, self.rank = eval.expectation_tn_nccl(
|
||||
circuit, self.dtype, self.observable, 32
|
||||
)
|
||||
if self.rank > 0:
|
||||
state = np.array(0)
|
||||
else:
|
||||
raise_error(NotImplementedError, "Compute type not supported.")
|
||||
|
||||
if self.expectation_enabled:
|
||||
return state.flatten().real
|
||||
else:
|
||||
if return_array:
|
||||
statevector = state.flatten()
|
||||
else:
|
||||
statevector = state
|
||||
|
||||
return TensorNetworkResult(
|
||||
nqubits=circuit.nqubits,
|
||||
backend=self,
|
||||
measures=None,
|
||||
measured_probabilities=None,
|
||||
prob_type=None,
|
||||
statevector=statevector,
|
||||
)
|
||||
317
src/qibotn/backends/qmatchatea.py
Normal file
317
src/qibotn/backends/qmatchatea.py
Normal file
@@ -0,0 +1,317 @@
|
||||
"""Implementation of Quantum Matcha Tea backend."""
|
||||
|
||||
import re
|
||||
from dataclasses import dataclass
|
||||
|
||||
import numpy as np
|
||||
import qiskit
|
||||
import qmatchatea
|
||||
import qtealeaves
|
||||
from qibo.backends import NumpyBackend
|
||||
from qibo.config import raise_error
|
||||
|
||||
from qibotn.backends.abstract import QibotnBackend
|
||||
from qibotn.result import TensorNetworkResult
|
||||
|
||||
|
||||
@dataclass
|
||||
class QMatchaTeaBackend(QibotnBackend, NumpyBackend):
|
||||
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
self.name = "qibotn"
|
||||
self.platform = "qmatchatea"
|
||||
|
||||
# Default precision
|
||||
self.precision = "double"
|
||||
|
||||
# Set default configurations
|
||||
self.configure_tn_simulation()
|
||||
self._setup_backend_specifics()
|
||||
|
||||
def configure_tn_simulation(
|
||||
self,
|
||||
ansatz: str = "MPS",
|
||||
max_bond_dimension: int = 10,
|
||||
cut_ratio: float = 1e-9,
|
||||
trunc_tracking_mode: str = "C",
|
||||
svd_control: str = "A",
|
||||
ini_bond_dimension: int = 1,
|
||||
):
|
||||
"""Configure TN simulation given Quantum Matcha Tea interface.
|
||||
|
||||
Args:
|
||||
ansatz (str): tensor network ansatz. It can be tree tensor network "TTN"
|
||||
or Matrix Product States "MPS" (default).
|
||||
max_bond_dimension : int, optional Maximum bond dimension of the problem. Default to 10.
|
||||
cut_ratio : float, optional
|
||||
Cut ratio for singular values. If :math:`\\lambda_n/\\lambda_1 <` cut_ratio then
|
||||
:math:`\\lambda_n` is neglected. Default to 1e-9.
|
||||
trunc_tracking_mode : str, optional
|
||||
Modus for storing truncation, 'M' for maximum, 'C' for
|
||||
cumulated (default).
|
||||
svd_ctrl : character, optional
|
||||
Control for the SVD algorithm. Available:
|
||||
- "A" : automatic. Some heuristic is run to choose the best mode for the algorithm.
|
||||
- "V" : gesvd. Safe but slow method.
|
||||
- "D" : gesdd. Fast iterative method. It might fail. Resort to gesvd if it fails
|
||||
- "E" : eigenvalue decomposition method. Faster on GPU. Available only when
|
||||
contracting the singular value to left or right
|
||||
- "X" : sparse eigenvalue decomposition method. Used when you reach the maximum
|
||||
bond dimension.
|
||||
- "R" : random svd method. Used when you reach the maximum bond dimension.
|
||||
Default to 'A'.
|
||||
ini_bond_dimension: int, optional
|
||||
Initial bond dimension of the simulation. It is used if the initial state is random.
|
||||
Default to 1.
|
||||
"""
|
||||
|
||||
self.convergence_params = qmatchatea.QCConvergenceParameters(
|
||||
max_bond_dimension=max_bond_dimension,
|
||||
cut_ratio=cut_ratio,
|
||||
trunc_tracking_mode=trunc_tracking_mode,
|
||||
svd_ctrl=svd_control,
|
||||
ini_bond_dimension=ini_bond_dimension,
|
||||
)
|
||||
self.ansatz = ansatz
|
||||
|
||||
def _setup_backend_specifics(self):
|
||||
"""Configure qmatchatea QCBackend object."""
|
||||
|
||||
qmatchatea_device = (
|
||||
"cpu" if "CPU" in self.device else "gpu" if "GPU" in self.device else None
|
||||
)
|
||||
qmatchatea_precision = (
|
||||
"C"
|
||||
if self.precision == "single"
|
||||
else "Z" if self.precision == "double" else "A"
|
||||
)
|
||||
|
||||
# TODO: once MPI is available for Python, integrate it here
|
||||
self.qmatchatea_backend = qmatchatea.QCBackend(
|
||||
precision=qmatchatea_precision,
|
||||
device=qmatchatea_device,
|
||||
ansatz=self.ansatz,
|
||||
)
|
||||
|
||||
def execute_circuit(
|
||||
self,
|
||||
circuit,
|
||||
initial_state=None,
|
||||
nshots=None,
|
||||
prob_type=None,
|
||||
return_array=False,
|
||||
**prob_kwargs,
|
||||
):
|
||||
"""Execute a Qibo quantum circuit using tensor network simulation.
|
||||
|
||||
This method returns a ``TensorNetworkResult`` object, which provides:
|
||||
- Reconstruction of the system state (if the system size is < 20).
|
||||
- Frequencies (if the number of shots is specified).
|
||||
- Probabilities computed using various methods.
|
||||
|
||||
The following probability computation methods are available, as implemented
|
||||
in Quantum Matcha Tea:
|
||||
- **"E" (Even):** Probabilities are computed by evenly descending the probability tree,
|
||||
pruning branches (states) with probabilities below a threshold.
|
||||
- **"G" (Greedy):** Probabilities are computed by following the most probable states
|
||||
in descending order until reaching a given coverage (sum of probabilities).
|
||||
- **"U" (Unbiased):** An optimal probability measure that is unbiased and designed
|
||||
for best performance. See https://arxiv.org/abs/2401.10330 for details.
|
||||
|
||||
Args:
|
||||
circuit: A Qibo circuit to execute.
|
||||
initial_state: The initial state of the system (default is the vacuum state
|
||||
for tensor network simulations).
|
||||
nshots: The number of shots for shot-noise simulation (optional).
|
||||
prob_type: The probability computation method. Must be one of:
|
||||
- "E" (Even)
|
||||
- "G" (Greedy)
|
||||
- "U" (Unbiased) [default].
|
||||
prob_kwargs: Additional parameters required for probability computation:
|
||||
- For "U", requires ``num_samples``.
|
||||
- For "E" and "G", requires ``prob_threshold``.
|
||||
|
||||
Returns:
|
||||
TensorNetworkResult: An object with methods to reconstruct the state,
|
||||
compute probabilities, and generate frequencies.
|
||||
"""
|
||||
|
||||
# TODO: verify if the QCIO mechanism of matcha is supported by Fortran only
|
||||
# as written in the docstrings or by Python too (see ``io_info`` argument of
|
||||
# ``qmatchatea.interface.run_simulation`` function)
|
||||
if initial_state is not None:
|
||||
raise_error(
|
||||
NotImplementedError,
|
||||
f"Backend {self} currently does not support initial state.",
|
||||
)
|
||||
|
||||
if prob_type == None:
|
||||
prob_type = "U"
|
||||
prob_kwargs = {"num_samples": 500}
|
||||
|
||||
# TODO: check
|
||||
circuit = self._qibocirc_to_qiskitcirc(circuit)
|
||||
run_qk_params = qmatchatea.preprocessing.qk_transpilation_params(False)
|
||||
|
||||
# Initialize the TNObservable object
|
||||
observables = qtealeaves.observables.TNObservables()
|
||||
|
||||
# Shots
|
||||
if nshots is not None:
|
||||
observables += qtealeaves.observables.TNObsProjective(num_shots=nshots)
|
||||
|
||||
# Probabilities
|
||||
observables += qtealeaves.observables.TNObsProbabilities(
|
||||
prob_type=prob_type,
|
||||
**prob_kwargs,
|
||||
)
|
||||
|
||||
# State
|
||||
observables += qtealeaves.observables.TNState2File(name="temp", formatting="U")
|
||||
|
||||
results = qmatchatea.run_simulation(
|
||||
circ=circuit,
|
||||
convergence_parameters=self.convergence_params,
|
||||
transpilation_parameters=run_qk_params,
|
||||
backend=self.qmatchatea_backend,
|
||||
observables=observables,
|
||||
)
|
||||
|
||||
if circuit.num_qubits < 20 and return_array:
|
||||
statevector = results.statevector
|
||||
else:
|
||||
statevector = None
|
||||
|
||||
return TensorNetworkResult(
|
||||
nqubits=circuit.num_qubits,
|
||||
backend=self,
|
||||
measures=results.measures,
|
||||
measured_probabilities=results.measure_probabilities,
|
||||
prob_type=prob_type,
|
||||
statevector=statevector,
|
||||
)
|
||||
|
||||
def expectation(self, circuit, observable):
|
||||
"""Compute the expectation value of a Qibo-friendly ``observable`` on
|
||||
the Tensor Network constructed from a Qibo ``circuit``.
|
||||
|
||||
This method takes a Qibo-style symbolic Hamiltonian (e.g., `X(0)*Z(1) + 2.0*Y(2)*Z(0)`)
|
||||
as the observable, converts it into a Quantum Matcha Tea (qmatchatea) observable
|
||||
(using `TNObsTensorProduct` and `TNObsWeightedSum`), and computes its expectation
|
||||
value using the provided circuit.
|
||||
|
||||
Args:
|
||||
circuit: A Qibo quantum circuit object on which the expectation value
|
||||
is computed. The circuit should be compatible with the qmatchatea
|
||||
Tensor Network backend.
|
||||
observable: The observable whose expectation value we want to compute.
|
||||
This must be provided in the symbolic Hamiltonian form supported by Qibo
|
||||
(e.g., `X(0)*Y(1)` or `Z(0)*Z(1) + 1.5*Y(2)`).
|
||||
|
||||
Returns:
|
||||
qibotn.TensorNetworkResult class, providing methods to retrieve
|
||||
probabilities, frequencies and state always according to the chosen
|
||||
simulation setup.
|
||||
"""
|
||||
|
||||
# From Qibo to Qiskit
|
||||
circuit = self._qibocirc_to_qiskitcirc(circuit)
|
||||
run_qk_params = qmatchatea.preprocessing.qk_transpilation_params(False)
|
||||
|
||||
operators = qmatchatea.QCOperators()
|
||||
observables = qtealeaves.observables.TNObservables()
|
||||
# Add custom observable
|
||||
observables += self._qiboobs_to_qmatchaobs(hamiltonian=observable)
|
||||
|
||||
results = qmatchatea.run_simulation(
|
||||
circ=circuit,
|
||||
convergence_parameters=self.convergence_params,
|
||||
transpilation_parameters=run_qk_params,
|
||||
backend=self.qmatchatea_backend,
|
||||
observables=observables,
|
||||
operators=operators,
|
||||
)
|
||||
|
||||
return np.real(results.observables["custom_hamiltonian"])
|
||||
|
||||
def _qibocirc_to_qiskitcirc(self, qibo_circuit) -> qiskit.QuantumCircuit:
|
||||
"""Convert a Qibo Circuit into a Qiskit Circuit."""
|
||||
# Convert the circuit to QASM 2.0 to qiskit
|
||||
qasm_circuit = qibo_circuit.to_qasm()
|
||||
qiskit_circuit = qiskit.QuantumCircuit.from_qasm_str(qasm_circuit)
|
||||
|
||||
# Transpile the circuit to adapt it to the linear structure of the MPS,
|
||||
# with the constraint of having only the gates basis_gates
|
||||
qiskit_circuit = qmatchatea.preprocessing.preprocess(
|
||||
qiskit_circuit,
|
||||
qk_params=qmatchatea.preprocessing.qk_transpilation_params(),
|
||||
)
|
||||
return qiskit_circuit
|
||||
|
||||
def _qiboobs_to_qmatchaobs(self, hamiltonian, observable_name="custom_hamiltonian"):
|
||||
"""
|
||||
Convert a Qibo SymbolicHamiltonian into a qmatchatea TNObsWeightedSum observable.
|
||||
|
||||
The SymbolicHamiltonian is expected to have a collection of terms, where each term has:
|
||||
- `coefficient`: A numeric value.
|
||||
- `factors`: A list of factors, each a string such as "X2" or "Z0", representing an operator
|
||||
and the qubit it acts on.
|
||||
|
||||
Args:
|
||||
hamiltonian (qibo.SymbolicHamiltonian): The symbolic Hamiltonian containing the terms.
|
||||
observable_name (str): The name for the resulting TNObsWeightedSum observable.
|
||||
|
||||
Returns:
|
||||
TNObsWeightedSum: An observable suitable for use with qmatchatea.
|
||||
"""
|
||||
coeff_list = []
|
||||
tensor_product_obs = None
|
||||
|
||||
# Regex to split an operator factor (e.g., "X2" -> operator "X", qubit 2)
|
||||
factor_pattern = re.compile(r"([^\d]+)(\d+)")
|
||||
|
||||
# Iterate over each term in the symbolic Hamiltonian
|
||||
for i, term in enumerate(hamiltonian.terms):
|
||||
# Store the term's coefficient
|
||||
coeff_list.append(term.coefficient)
|
||||
|
||||
operator_names = []
|
||||
acting_on_qubits = []
|
||||
|
||||
# Process each factor in the term
|
||||
for factor in term.factors:
|
||||
# Assume each factor is a string like "Y2" or "Z0"
|
||||
match = factor_pattern.match(str(factor))
|
||||
if match:
|
||||
operator_name = match.group(1)
|
||||
qubit_index = int(match.group(2))
|
||||
operator_names.append(operator_name)
|
||||
acting_on_qubits.append([qubit_index])
|
||||
else:
|
||||
raise ValueError(
|
||||
f"Factor '{str(factor)}' does not match the expected format."
|
||||
)
|
||||
|
||||
# Create a TNObsTensorProduct for this term.
|
||||
term_tensor_prod = qtealeaves.observables.TNObsTensorProduct(
|
||||
name=f"term_{i}",
|
||||
operators=operator_names,
|
||||
sites=acting_on_qubits,
|
||||
)
|
||||
|
||||
# Combine tensor products from each term
|
||||
if tensor_product_obs is None:
|
||||
tensor_product_obs = term_tensor_prod
|
||||
else:
|
||||
tensor_product_obs += term_tensor_prod
|
||||
|
||||
# Combine all terms into a weighted sum observable
|
||||
obs_sum = qtealeaves.observables.TNObsWeightedSum(
|
||||
name=observable_name,
|
||||
tp_operators=tensor_product_obs,
|
||||
coeffs=coeff_list,
|
||||
use_itpo=False,
|
||||
)
|
||||
return obs_sum
|
||||
387
src/qibotn/backends/quimb.py
Normal file
387
src/qibotn/backends/quimb.py
Normal file
@@ -0,0 +1,387 @@
|
||||
from collections import Counter
|
||||
from typing import Optional
|
||||
|
||||
import quimb as qu
|
||||
import quimb.tensor as qtn
|
||||
from qibo.config import raise_error
|
||||
from qibo.gates.abstract import ParametrizedGate
|
||||
from qibo.models import Circuit
|
||||
|
||||
from qibotn.backends.abstract import QibotnBackend
|
||||
from qibotn.result import TensorNetworkResult
|
||||
|
||||
GATE_MAP = {
|
||||
"h": "H",
|
||||
"x": "X",
|
||||
"y": "Y",
|
||||
"z": "Z",
|
||||
"s": "S",
|
||||
"t": "T",
|
||||
"rx": "RX",
|
||||
"ry": "RY",
|
||||
"rz": "RZ",
|
||||
"u3": "U3",
|
||||
"cx": "CX",
|
||||
"cnot": "CNOT",
|
||||
"cy": "CY",
|
||||
"cz": "CZ",
|
||||
"iswap": "ISWAP",
|
||||
"swap": "SWAP",
|
||||
"ccx": "CCX",
|
||||
"ccy": "CCY",
|
||||
"ccz": "CCZ",
|
||||
"toffoli": "TOFFOLI",
|
||||
"cswap": "CSWAP",
|
||||
"fredkin": "FREDKIN",
|
||||
"fsim": "fsim",
|
||||
"measure": "measure",
|
||||
}
|
||||
|
||||
|
||||
def __init__(self, quimb_backend="numpy", contraction_optimizer="auto-hq"):
|
||||
super(self.__class__, self).__init__()
|
||||
|
||||
self.name = "qibotn"
|
||||
self.platform = "quimb"
|
||||
self.backend = quimb_backend
|
||||
|
||||
self.ansatz = None
|
||||
self.max_bond_dimension = None
|
||||
self.svd_cutoff = None
|
||||
self.n_most_frequent_states = None
|
||||
|
||||
self.configure_tn_simulation()
|
||||
self.setup_backend_specifics(
|
||||
quimb_backend=quimb_backend, contractions_optimizer=contraction_optimizer
|
||||
)
|
||||
|
||||
|
||||
def configure_tn_simulation(
|
||||
self,
|
||||
ansatz: str = "mps",
|
||||
max_bond_dimension: Optional[int] = None,
|
||||
svd_cutoff: Optional[float] = 1e-10,
|
||||
n_most_frequent_states: int = 100,
|
||||
):
|
||||
"""
|
||||
Configure tensor network simulation.
|
||||
|
||||
Args:
|
||||
ansatz : str, optional
|
||||
The tensor network ansatz to use. Default is `None` and, in this case, a
|
||||
generic Circuit Quimb class is used.
|
||||
max_bond_dimension : int, optional
|
||||
The maximum bond dimension for the MPS ansatz. Default is 10.
|
||||
|
||||
Notes:
|
||||
- The ansatz determines the tensor network structure used for simulation. Currently, only "MPS" is supported.
|
||||
- The `max_bond_dimension` parameter controls the maximum allowed bond dimension for the MPS ansatz.
|
||||
"""
|
||||
self.ansatz = ansatz
|
||||
self.max_bond_dimension = max_bond_dimension
|
||||
self.svd_cutoff = svd_cutoff
|
||||
self.n_most_frequent_states = n_most_frequent_states
|
||||
|
||||
|
||||
@property
|
||||
def circuit_ansatz(self):
|
||||
if self.ansatz == "mps":
|
||||
return qtn.CircuitMPS
|
||||
return qtn.Circuit
|
||||
|
||||
|
||||
def setup_backend_specifics(
|
||||
self, quimb_backend="numpy", contractions_optimizer="auto-hq"
|
||||
):
|
||||
"""Setup backend specifics.
|
||||
Args:
|
||||
quimb_backend: str
|
||||
The backend to use for the quimb tensor network simulation.
|
||||
contractions_optimizer: str, optional
|
||||
The contractions_optimizer to use for the quimb tensor network simulation.
|
||||
"""
|
||||
# this is not really working because it does not change the inheritance
|
||||
if quimb_backend == "jax":
|
||||
import jax.numpy as jnp
|
||||
|
||||
self.engine = jnp
|
||||
elif quimb_backend == "numpy":
|
||||
import numpy as np
|
||||
|
||||
self.engine = np
|
||||
elif quimb_backend == "torch":
|
||||
import torch
|
||||
|
||||
self.engine = torch
|
||||
else:
|
||||
raise_error(ValueError, f"Unsupported quimb backend: {quimb_backend}")
|
||||
|
||||
self.backend = quimb_backend
|
||||
self.contractions_optimizer = contractions_optimizer
|
||||
|
||||
|
||||
def execute_circuit(
|
||||
self,
|
||||
circuit: Circuit,
|
||||
initial_state=None,
|
||||
nshots=None,
|
||||
return_array=False,
|
||||
):
|
||||
"""
|
||||
Execute a quantum circuit using the specified tensor network ansatz and initial state.
|
||||
|
||||
Args:
|
||||
circuit : QuantumCircuit
|
||||
The quantum circuit to be executed.
|
||||
initial_state : array-like, optional
|
||||
The initial state of the quantum system. Only supported for Matrix Product States (MPS) ansatz.
|
||||
nshots : int, optional
|
||||
The number of shots for sampling the circuit. If None, no sampling is performed, and the full statevector is used.
|
||||
return_array : bool, optional
|
||||
If True, returns the statevector as a dense array. Default is False.
|
||||
|
||||
Returns:
|
||||
TensorNetworkResult
|
||||
An object containing the results of the circuit execution, including:
|
||||
- nqubits: Number of qubits in the circuit.
|
||||
- backend: The backend used for execution.
|
||||
- measures: The measurement frequencies if nshots is specified, otherwise None.
|
||||
- measured_probabilities: A dictionary of computational basis states and their probabilities.
|
||||
- prob_type: The type of probability computation used (currently "default").
|
||||
- statevector: The final statevector as a dense array if return_array is True, otherwise None.
|
||||
|
||||
Raises:
|
||||
ValueError
|
||||
If an initial state is provided but the ansatz is not "MPS".
|
||||
|
||||
Notes:
|
||||
- The ansatz determines the tensor network structure used for simulation. Currently, only "MPS" is supported.
|
||||
- If `initial_state` is provided, it must be compatible with the MPS ansatz.
|
||||
- The `nshots` parameter enables sampling from the circuit's output distribution. If not specified, the full statevector is computed.
|
||||
"""
|
||||
if initial_state is not None and self.ansatz == "MPS":
|
||||
initial_state = qtn.tensor_1d.MatrixProductState.from_dense(
|
||||
initial_state, 2
|
||||
) # 2 is the physical dimension
|
||||
elif initial_state is not None:
|
||||
raise_error(ValueError, "Initial state not None supported only for MPS ansatz.")
|
||||
|
||||
circ_quimb = self.circuit_ansatz.from_openqasm2_str(
|
||||
circuit.to_qasm(), psi0=initial_state
|
||||
)
|
||||
|
||||
if nshots:
|
||||
frequencies = Counter(circ_quimb.sample(nshots))
|
||||
main_frequencies = {
|
||||
state: count
|
||||
for state, count in frequencies.most_common(self.n_most_frequent_states)
|
||||
}
|
||||
computational_states = list(main_frequencies.keys())
|
||||
amplitudes = {
|
||||
state: circ_quimb.amplitude(state) for state in computational_states
|
||||
}
|
||||
measured_probabilities = {
|
||||
state: abs(amplitude) ** 2 for state, amplitude in amplitudes.items()
|
||||
}
|
||||
else:
|
||||
frequencies = None
|
||||
measured_probabilities = None
|
||||
|
||||
statevector = (
|
||||
circ_quimb.to_dense(backend=self.backend, optimize=self.contractions_optimizer)
|
||||
if return_array
|
||||
else None
|
||||
)
|
||||
return TensorNetworkResult(
|
||||
nqubits=circuit.nqubits,
|
||||
backend=self,
|
||||
measures=frequencies,
|
||||
measured_probabilities=measured_probabilities,
|
||||
prob_type="default",
|
||||
statevector=statevector,
|
||||
)
|
||||
|
||||
|
||||
def exp_value_observable_symbolic(
|
||||
self, circuit, operators_list, sites_list, coeffs_list, nqubits
|
||||
):
|
||||
"""
|
||||
Compute the expectation value of a symbolic Hamiltonian on a quantum circuit using tensor network contraction.
|
||||
This method takes a Qibo circuit, converts it to a Quimb tensor network circuit, and evaluates the expectation value
|
||||
of a Hamiltonian specified by three lists of strings: operators, sites, and coefficients.
|
||||
The expectation value is computed by summing the contributions from each term in the Hamiltonian, where each term's
|
||||
expectation is calculated using Quimb's `local_expectation` function.
|
||||
Each operator string must act on all different qubits, i.e., for each term, the corresponding sites tuple must contain unique qubit indices.
|
||||
Example: operators_list = ['xyz', 'xyz'], sites_list = [(1,2,3), (1,2,3)], coeffs_list = [1, 2]
|
||||
|
||||
|
||||
Parameters
|
||||
----------
|
||||
circuit : qibo.models.Circuit
|
||||
The quantum circuit to evaluate, provided as a Qibo circuit object.
|
||||
operators_list : list of str
|
||||
List of operator strings representing the symbolic Hamiltonian terms.
|
||||
sites_list : list of tuple of int
|
||||
Tuples each specifying the qubits (sites) the corresponding operator acts on.
|
||||
coeffs_list : list of real/complex
|
||||
The coefficients for each Hamiltonian term.
|
||||
Returns
|
||||
-------
|
||||
float
|
||||
The real part of the expectation value of the Hamiltonian on the given circuit state.
|
||||
"""
|
||||
# Validate that no term acts multiple times on the same qubit (no repeated indices in a sites tuple)
|
||||
for sites in sites_list:
|
||||
if len(sites) != len(set(sites)):
|
||||
raise_error(
|
||||
ValueError,
|
||||
f"Invalid Hamiltonian term sites {sites}: repeated qubit indices are not allowed "
|
||||
"within a single term (e.g. (0,0,0) is invalid).",
|
||||
)
|
||||
quimb_circuit = self._qibo_circuit_to_quimb(
|
||||
circuit,
|
||||
quimb_circuit_type=self.circuit_ansatz,
|
||||
gate_opts={"max_bond": self.max_bond_dimension, "cutoff": self.svd_cutoff},
|
||||
)
|
||||
|
||||
expectation_value = 0.0
|
||||
for opstr, sites, coeff in zip(operators_list, sites_list, coeffs_list):
|
||||
|
||||
ops = self._string_to_quimb_operator(opstr)
|
||||
coeff = coeff.real
|
||||
|
||||
exp_values = quimb_circuit.local_expectation(
|
||||
ops,
|
||||
where=sites,
|
||||
backend=self.backend,
|
||||
optimize=self.contractions_optimizer,
|
||||
simplify_sequence="R",
|
||||
)
|
||||
|
||||
expectation_value = expectation_value + coeff * exp_values
|
||||
|
||||
return self.real(expectation_value)
|
||||
|
||||
|
||||
def _qibo_circuit_to_quimb(
|
||||
self, qibo_circ, quimb_circuit_type=qtn.Circuit, **circuit_kwargs
|
||||
):
|
||||
"""
|
||||
Convert a Qibo Circuit to a Quimb Circuit. Measurement gates are ignored. If are given gates not supported by Quimb, an error is raised.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
qibo_circ : qibo.models.circuit.Circuit
|
||||
The circuit to convert.
|
||||
quimb_circuit_type : type
|
||||
The Quimb circuit class to use (Circuit, CircuitMPS, etc).
|
||||
circuit_kwargs : dict
|
||||
Extra arguments to pass to the Quimb circuit constructor.
|
||||
|
||||
Returns
|
||||
-------
|
||||
circ : quimb.tensor.circuit.Circuit
|
||||
The converted circuit.
|
||||
"""
|
||||
nqubits = qibo_circ.nqubits
|
||||
circ = quimb_circuit_type(nqubits, **circuit_kwargs)
|
||||
|
||||
for gate in qibo_circ.queue:
|
||||
gate_name = getattr(gate, "name", None)
|
||||
quimb_gate_name = GATE_MAP.get(gate_name, None)
|
||||
if quimb_gate_name == "measure":
|
||||
continue
|
||||
if quimb_gate_name is None:
|
||||
raise_error(ValueError, f"Gate {gate_name} not supported in Quimb backend.")
|
||||
|
||||
params = getattr(gate, "parameters", ())
|
||||
qubits = getattr(gate, "qubits", ())
|
||||
|
||||
is_parametrized = isinstance(gate, ParametrizedGate) and getattr(
|
||||
gate, "trainable", True
|
||||
)
|
||||
if is_parametrized:
|
||||
circ.apply_gate(
|
||||
quimb_gate_name, *params, *qubits, parametrized=is_parametrized
|
||||
)
|
||||
else:
|
||||
circ.apply_gate(
|
||||
quimb_gate_name,
|
||||
*params,
|
||||
*qubits,
|
||||
)
|
||||
return circ
|
||||
|
||||
|
||||
def _string_to_quimb_operator(self, op_str):
|
||||
"""
|
||||
Convert a Pauli string (e.g. 'xzy') to a Quimb operator using '&' chaining.
|
||||
|
||||
Parameters
|
||||
----------
|
||||
op_str : str
|
||||
A string like 'xzy', where each character is one of 'x', 'y', 'z', 'i'.
|
||||
|
||||
Returns
|
||||
-------
|
||||
qu_op : quimb.Qarray
|
||||
The corresponding Quimb operator.
|
||||
"""
|
||||
op_str = op_str.lower()
|
||||
op = qu.pauli(op_str[0])
|
||||
for c in op_str[1:]:
|
||||
op = op & qu.pauli(c)
|
||||
return op
|
||||
|
||||
|
||||
CLASSES_ROOTS = {"numpy": "Numpy", "torch": "PyTorch", "jax": "Jax"}
|
||||
|
||||
METHODS = {
|
||||
"__init__": __init__,
|
||||
"configure_tn_simulation": configure_tn_simulation,
|
||||
"setup_backend_specifics": setup_backend_specifics,
|
||||
"execute_circuit": execute_circuit,
|
||||
"exp_value_observable_symbolic": exp_value_observable_symbolic,
|
||||
"_qibo_circuit_to_quimb": _qibo_circuit_to_quimb,
|
||||
"_string_to_quimb_operator": _string_to_quimb_operator,
|
||||
"circuit_ansatz": circuit_ansatz,
|
||||
}
|
||||
|
||||
|
||||
def _generate_backend(quimb_backend: str = "numpy"):
|
||||
bases = (QibotnBackend,)
|
||||
|
||||
if quimb_backend == "numpy":
|
||||
from qibo.backends import NumpyBackend
|
||||
|
||||
bases += (NumpyBackend,)
|
||||
elif quimb_backend == "torch":
|
||||
from qiboml.backends import PyTorchBackend
|
||||
|
||||
bases += (PyTorchBackend,)
|
||||
elif quimb_backend == "jax":
|
||||
from qiboml.backends import JaxBackend
|
||||
|
||||
bases += (JaxBackend,)
|
||||
else:
|
||||
raise_error(ValueError, f"Unsupported quimb backend: {quimb_backend}")
|
||||
|
||||
return type(f"Quimb{CLASSES_ROOTS[quimb_backend]}Backend", bases, METHODS)
|
||||
|
||||
|
||||
BACKENDS = {}
|
||||
for k, v in CLASSES_ROOTS.items():
|
||||
backend_name = f"Quimb{v}Backend"
|
||||
try:
|
||||
backend = _generate_backend(k)
|
||||
BACKENDS[k] = backend
|
||||
globals()[backend_name] = backend
|
||||
except ImportError:
|
||||
continue
|
||||
|
||||
|
||||
def __getattr__(name):
|
||||
try:
|
||||
return BACKENDS[name]
|
||||
except KeyError:
|
||||
raise AttributeError(f"module {__name__!r} has no attribute {name!r}") from None
|
||||
246
src/qibotn/circuit_convertor.py
Normal file
246
src/qibotn/circuit_convertor.py
Normal file
@@ -0,0 +1,246 @@
|
||||
import cupy as cp
|
||||
import numpy as np
|
||||
|
||||
# Reference: https://github.com/NVIDIA/cuQuantum/tree/main/python/samples/cutensornet/circuit_converter
|
||||
|
||||
|
||||
class QiboCircuitToEinsum:
|
||||
"""Convert a circuit to a Tensor Network (TN) representation.
|
||||
|
||||
The circuit is first processed to an intermediate form by grouping each gate matrix
|
||||
with its corresponding qubit it is acting on to a list. It is then converted to an
|
||||
equivalent TN expression through the class function state_vector_operands()
|
||||
following the Einstein summation convention in the interleave format.
|
||||
|
||||
See document for detail of the format: https://docs.nvidia.com/cuda/cuquantum/python/api/generated/cuquantum.contract.html
|
||||
|
||||
The output is to be used by cuQuantum's contract() for computation of the
|
||||
state vectors of the circuit.
|
||||
"""
|
||||
|
||||
def __init__(self, circuit, dtype="complex128"):
|
||||
self.backend = cp
|
||||
self.dtype = getattr(self.backend, dtype)
|
||||
self.init_basis_map(self.backend, dtype)
|
||||
self.init_intermediate_circuit(circuit)
|
||||
self.circuit = circuit
|
||||
|
||||
def state_vector_operands(self):
|
||||
"""Create the operands for dense vector computation in the interleave
|
||||
format.
|
||||
|
||||
Returns:
|
||||
Operands for the contraction in the interleave format.
|
||||
"""
|
||||
input_bitstring = "0" * len(self.active_qubits)
|
||||
|
||||
input_operands = self._get_bitstring_tensors(input_bitstring)
|
||||
|
||||
(
|
||||
mode_labels,
|
||||
qubits_frontier,
|
||||
next_frontier,
|
||||
) = self._init_mode_labels_from_qubits(self.active_qubits)
|
||||
|
||||
gate_mode_labels, gate_operands = self._parse_gates_to_mode_labels_operands(
|
||||
self.gate_tensors, qubits_frontier, next_frontier
|
||||
)
|
||||
|
||||
operands = input_operands + gate_operands
|
||||
mode_labels += gate_mode_labels
|
||||
|
||||
out_list = []
|
||||
for key in qubits_frontier:
|
||||
out_list.append(qubits_frontier[key])
|
||||
|
||||
operand_exp_interleave = [x for y in zip(operands, mode_labels) for x in y]
|
||||
operand_exp_interleave.append(out_list)
|
||||
return operand_exp_interleave
|
||||
|
||||
def _init_mode_labels_from_qubits(self, qubits):
|
||||
n = len(qubits)
|
||||
frontier_dict = {q: i for i, q in enumerate(qubits)}
|
||||
mode_labels = [[i] for i in range(n)]
|
||||
return mode_labels, frontier_dict, n
|
||||
|
||||
def _get_bitstring_tensors(self, bitstring):
|
||||
return [self.basis_map[ibit] for ibit in bitstring]
|
||||
|
||||
def _parse_gates_to_mode_labels_operands(
|
||||
self, gates, qubits_frontier, next_frontier
|
||||
):
|
||||
mode_labels = []
|
||||
operands = []
|
||||
|
||||
for tensor, gate_qubits in gates:
|
||||
operands.append(tensor)
|
||||
input_mode_labels = []
|
||||
output_mode_labels = []
|
||||
for q in gate_qubits:
|
||||
input_mode_labels.append(qubits_frontier[q])
|
||||
output_mode_labels.append(next_frontier)
|
||||
qubits_frontier[q] = next_frontier
|
||||
next_frontier += 1
|
||||
mode_labels.append(output_mode_labels + input_mode_labels)
|
||||
return mode_labels, operands
|
||||
|
||||
def op_shape_from_qubits(self, nqubits):
|
||||
"""Modify tensor to cuQuantum shape.
|
||||
|
||||
Parameters:
|
||||
nqubits (int): The number of qubits in quantum circuit.
|
||||
|
||||
Returns:
|
||||
(qubit_states,input_output) * nqubits
|
||||
"""
|
||||
return (2, 2) * nqubits
|
||||
|
||||
def init_intermediate_circuit(self, circuit):
|
||||
"""Initialize the intermediate circuit representation.
|
||||
|
||||
This method initializes the intermediate circuit representation by extracting gate matrices and qubit IDs
|
||||
from the given quantum circuit.
|
||||
|
||||
Parameters:
|
||||
circuit (object): The quantum circuit object.
|
||||
"""
|
||||
self.gate_tensors = []
|
||||
gates_qubits = []
|
||||
|
||||
for gate in circuit.queue:
|
||||
gate_qubits = gate.control_qubits + gate.target_qubits
|
||||
gates_qubits.extend(gate_qubits)
|
||||
|
||||
# self.gate_tensors is to extract into a list the gate matrix together with the qubit id that it is acting on
|
||||
# https://github.com/NVIDIA/cuQuantum/blob/6b6339358f859ea930907b79854b90b2db71ab92/python/cuquantum/cutensornet/_internal/circuit_parser_utils_cirq.py#L32
|
||||
required_shape = self.op_shape_from_qubits(len(gate_qubits))
|
||||
self.gate_tensors.append(
|
||||
(
|
||||
cp.asarray(gate.matrix(), dtype=self.dtype).reshape(required_shape),
|
||||
gate_qubits,
|
||||
)
|
||||
)
|
||||
|
||||
# self.active_qubits is to identify qubits with at least 1 gate acting on it in the whole circuit.
|
||||
self.active_qubits = np.unique(gates_qubits)
|
||||
|
||||
def init_basis_map(self, backend, dtype):
|
||||
"""Initialize the basis map for the quantum circuit.
|
||||
|
||||
This method initializes a basis map for the quantum circuit, which maps binary
|
||||
strings representing qubit states to their corresponding quantum state vectors.
|
||||
|
||||
Parameters:
|
||||
backend (object): The backend object providing the array conversion method.
|
||||
dtype (object): The data type for the quantum state vectors.
|
||||
"""
|
||||
asarray = backend.asarray
|
||||
state_0 = asarray([1, 0], dtype=dtype)
|
||||
state_1 = asarray([0, 1], dtype=dtype)
|
||||
|
||||
self.basis_map = {"0": state_0, "1": state_1}
|
||||
|
||||
def init_inverse_circuit(self, circuit):
|
||||
"""Initialize the inverse circuit representation.
|
||||
|
||||
This method initializes the inverse circuit representation by extracting gate matrices and qubit IDs
|
||||
from the given quantum circuit.
|
||||
|
||||
Parameters:
|
||||
circuit (object): The quantum circuit object.
|
||||
"""
|
||||
self.gate_tensors_inverse = []
|
||||
gates_qubits_inverse = []
|
||||
|
||||
for gate in circuit.queue:
|
||||
gate_qubits = gate.control_qubits + gate.target_qubits
|
||||
gates_qubits_inverse.extend(gate_qubits)
|
||||
|
||||
# self.gate_tensors is to extract into a list the gate matrix together with the qubit id that it is acting on
|
||||
# https://github.com/NVIDIA/cuQuantum/blob/6b6339358f859ea930907b79854b90b2db71ab92/python/cuquantum/cutensornet/_internal/circuit_parser_utils_cirq.py#L32
|
||||
required_shape = self.op_shape_from_qubits(len(gate_qubits))
|
||||
self.gate_tensors_inverse.append(
|
||||
(
|
||||
cp.asarray(gate.matrix()).reshape(required_shape),
|
||||
gate_qubits,
|
||||
)
|
||||
)
|
||||
|
||||
# self.active_qubits is to identify qubits with at least 1 gate acting on it in the whole circuit.
|
||||
self.active_qubits_inverse = np.unique(gates_qubits_inverse)
|
||||
|
||||
def get_pauli_gates(self, pauli_map, dtype="complex128", backend=cp):
|
||||
"""Populate the gates for all pauli operators.
|
||||
|
||||
Parameters:
|
||||
pauli_map: A dictionary mapping qubits to pauli operators.
|
||||
dtype: Data type for the tensor operands.
|
||||
backend: The package the tensor operands belong to.
|
||||
|
||||
Returns:
|
||||
A sequence of pauli gates.
|
||||
"""
|
||||
asarray = backend.asarray
|
||||
pauli_i = asarray([[1, 0], [0, 1]], dtype=dtype)
|
||||
pauli_x = asarray([[0, 1], [1, 0]], dtype=dtype)
|
||||
pauli_y = asarray([[0, -1j], [1j, 0]], dtype=dtype)
|
||||
pauli_z = asarray([[1, 0], [0, -1]], dtype=dtype)
|
||||
|
||||
operand_map = {"I": pauli_i, "X": pauli_x, "Y": pauli_y, "Z": pauli_z}
|
||||
gates = []
|
||||
for qubit, pauli_char in pauli_map.items():
|
||||
operand = operand_map.get(pauli_char)
|
||||
if operand is None:
|
||||
raise ValueError("pauli string character must be one of I/X/Y/Z")
|
||||
gates.append((operand, (qubit,)))
|
||||
return gates
|
||||
|
||||
def expectation_operands(self, ham_gates):
|
||||
"""Create the operands for pauli string expectation computation in the
|
||||
interleave format.
|
||||
|
||||
Parameters:
|
||||
ham_gates: A list of gates derived from Qibo hamiltonian object.
|
||||
|
||||
Returns:
|
||||
Operands for the contraction in the interleave format.
|
||||
"""
|
||||
input_bitstring = "0" * self.circuit.nqubits
|
||||
|
||||
input_operands = self._get_bitstring_tensors(input_bitstring)
|
||||
|
||||
(
|
||||
mode_labels,
|
||||
qubits_frontier,
|
||||
next_frontier,
|
||||
) = self._init_mode_labels_from_qubits(range(self.circuit.nqubits))
|
||||
|
||||
gate_mode_labels, gate_operands = self._parse_gates_to_mode_labels_operands(
|
||||
self.gate_tensors, qubits_frontier, next_frontier
|
||||
)
|
||||
|
||||
operands = input_operands + gate_operands
|
||||
mode_labels += gate_mode_labels
|
||||
|
||||
self.init_inverse_circuit(self.circuit.invert())
|
||||
|
||||
next_frontier = max(qubits_frontier.values()) + 1
|
||||
|
||||
gates_inverse = ham_gates + self.gate_tensors_inverse
|
||||
|
||||
(
|
||||
gate_mode_labels_inverse,
|
||||
gate_operands_inverse,
|
||||
) = self._parse_gates_to_mode_labels_operands(
|
||||
gates_inverse, qubits_frontier, next_frontier
|
||||
)
|
||||
mode_labels = (
|
||||
mode_labels
|
||||
+ gate_mode_labels_inverse
|
||||
+ [[qubits_frontier[ix]] for ix in range(self.circuit.nqubits)]
|
||||
)
|
||||
operands = operands + gate_operands_inverse + operands[: self.circuit.nqubits]
|
||||
|
||||
operand_exp_interleave = [x for y in zip(operands, mode_labels) for x in y]
|
||||
|
||||
return operand_exp_interleave
|
||||
47
src/qibotn/circuit_to_mps.py
Normal file
47
src/qibotn/circuit_to_mps.py
Normal file
@@ -0,0 +1,47 @@
|
||||
import cupy as cp
|
||||
import cuquantum.bindings.cutensornet as cutn
|
||||
import numpy as np
|
||||
|
||||
from qibotn.circuit_convertor import QiboCircuitToEinsum
|
||||
from qibotn.mps_utils import apply_gate, initial
|
||||
|
||||
|
||||
class QiboCircuitToMPS:
|
||||
"""A helper class to convert Qibo circuit to MPS.
|
||||
|
||||
Parameters:
|
||||
circ_qibo: The quantum circuit object.
|
||||
gate_algo(dict): Dictionary for SVD and QR settings.
|
||||
datatype (str): Either single ("complex64") or double (complex128) precision.
|
||||
rand_seed(int): Seed for random number generator.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
circ_qibo,
|
||||
gate_algo,
|
||||
dtype="complex128",
|
||||
rand_seed=0,
|
||||
):
|
||||
np.random.seed(rand_seed)
|
||||
cp.random.seed(rand_seed)
|
||||
|
||||
self.num_qubits = circ_qibo.nqubits
|
||||
self.handle = cutn.create()
|
||||
self.dtype = dtype
|
||||
self.mps_tensors = initial(self.num_qubits, dtype=dtype)
|
||||
circuitconvertor = QiboCircuitToEinsum(circ_qibo, dtype=dtype)
|
||||
|
||||
for gate, qubits in circuitconvertor.gate_tensors:
|
||||
# mapping from qubits to qubit indices
|
||||
# apply the gate in-place
|
||||
apply_gate(
|
||||
self.mps_tensors,
|
||||
gate,
|
||||
qubits,
|
||||
algorithm=gate_algo,
|
||||
options={"handle": self.handle},
|
||||
)
|
||||
|
||||
def __del__(self):
|
||||
cutn.destroy(self.handle)
|
||||
497
src/qibotn/eval.py
Normal file
497
src/qibotn/eval.py
Normal file
@@ -0,0 +1,497 @@
|
||||
import cupy as cp
|
||||
import cuquantum.bindings.cutensornet as cutn
|
||||
from cupy.cuda import nccl
|
||||
from cupy.cuda.runtime import getDeviceCount
|
||||
from cuquantum.tensornet import Network, contract
|
||||
from mpi4py import MPI
|
||||
from qibo import hamiltonians
|
||||
from qibo.symbols import I, X, Y, Z
|
||||
|
||||
from qibotn.circuit_convertor import QiboCircuitToEinsum
|
||||
from qibotn.circuit_to_mps import QiboCircuitToMPS
|
||||
from qibotn.mps_contraction_helper import MPSContractionHelper
|
||||
|
||||
|
||||
def check_observable(observable, circuit_nqubit):
|
||||
"""Checks the type of observable and returns the appropriate Hamiltonian."""
|
||||
if observable is None:
|
||||
return build_observable(circuit_nqubit)
|
||||
elif isinstance(observable, dict):
|
||||
return create_hamiltonian_from_dict(observable, circuit_nqubit)
|
||||
elif isinstance(observable, hamiltonians.SymbolicHamiltonian):
|
||||
# TODO: check if the observable is compatible with the circuit
|
||||
return observable
|
||||
else:
|
||||
raise TypeError("Invalid observable type.")
|
||||
|
||||
|
||||
def build_observable(circuit_nqubit):
|
||||
"""Helper function to construct a target observable."""
|
||||
hamiltonian_form = 0
|
||||
for i in range(circuit_nqubit):
|
||||
hamiltonian_form += 0.5 * X(i % circuit_nqubit) * Z((i + 1) % circuit_nqubit)
|
||||
|
||||
hamiltonian = hamiltonians.SymbolicHamiltonian(form=hamiltonian_form)
|
||||
return hamiltonian
|
||||
|
||||
|
||||
def create_hamiltonian_from_dict(data, circuit_nqubit):
|
||||
"""Create a Qibo SymbolicHamiltonian from a dictionary representation.
|
||||
|
||||
Ensures that each Hamiltonian term explicitly acts on all circuit qubits
|
||||
by adding identity (`I`) gates where needed.
|
||||
|
||||
Args:
|
||||
data (dict): Dictionary containing Hamiltonian terms.
|
||||
circuit_nqubit (int): Total number of qubits in the quantum circuit.
|
||||
|
||||
Returns:
|
||||
hamiltonians.SymbolicHamiltonian: The constructed Hamiltonian.
|
||||
"""
|
||||
PAULI_GATES = {"X": X, "Y": Y, "Z": Z}
|
||||
|
||||
terms = []
|
||||
|
||||
for term in data["terms"]:
|
||||
coeff = term["coefficient"]
|
||||
operators = term["operators"] # List of tuples like [("Z", 0), ("X", 1)]
|
||||
|
||||
# Convert the operator list into a dictionary {qubit_index: gate}
|
||||
operator_dict = {q: PAULI_GATES[g] for g, q in operators}
|
||||
|
||||
# Build the full term ensuring all qubits are covered
|
||||
full_term_expr = [
|
||||
operator_dict[q](q) if q in operator_dict else I(q)
|
||||
for q in range(circuit_nqubit)
|
||||
]
|
||||
|
||||
# Multiply all operators together to form a single term
|
||||
term_expr = full_term_expr[0]
|
||||
for op in full_term_expr[1:]:
|
||||
term_expr *= op
|
||||
|
||||
# Scale by the coefficient
|
||||
final_term = coeff * term_expr
|
||||
terms.append(final_term)
|
||||
|
||||
if not terms:
|
||||
raise ValueError("No valid Hamiltonian terms were added.")
|
||||
|
||||
# Combine all terms
|
||||
hamiltonian_form = sum(terms)
|
||||
|
||||
return hamiltonians.SymbolicHamiltonian(hamiltonian_form)
|
||||
|
||||
|
||||
def get_ham_gates(pauli_map, dtype="complex128", backend=cp):
|
||||
"""Populate the gates for all pauli operators.
|
||||
|
||||
Parameters:
|
||||
pauli_map: A dictionary mapping qubits to pauli operators.
|
||||
dtype: Data type for the tensor operands.
|
||||
backend: The package the tensor operands belong to.
|
||||
|
||||
Returns:
|
||||
A sequence of pauli gates.
|
||||
"""
|
||||
asarray = backend.asarray
|
||||
pauli_i = asarray([[1, 0], [0, 1]], dtype=dtype)
|
||||
pauli_x = asarray([[0, 1], [1, 0]], dtype=dtype)
|
||||
pauli_y = asarray([[0, -1j], [1j, 0]], dtype=dtype)
|
||||
pauli_z = asarray([[1, 0], [0, -1]], dtype=dtype)
|
||||
|
||||
operand_map = {"I": pauli_i, "X": pauli_x, "Y": pauli_y, "Z": pauli_z}
|
||||
gates = []
|
||||
for qubit, pauli_char, coeff in pauli_map:
|
||||
operand = operand_map.get(pauli_char)
|
||||
if operand is None:
|
||||
raise ValueError("pauli string character must be one of I/X/Y/Z")
|
||||
operand = coeff * operand
|
||||
gates.append((operand, (qubit,)))
|
||||
return gates
|
||||
|
||||
|
||||
def extract_gates_and_qubits(hamiltonian):
|
||||
"""
|
||||
Extracts the gates and their corresponding qubits from a Qibo Hamiltonian.
|
||||
|
||||
Parameters:
|
||||
hamiltonian (qibo.hamiltonians.Hamiltonian or qibo.hamiltonians.SymbolicHamiltonian):
|
||||
A Qibo Hamiltonian object.
|
||||
|
||||
Returns:
|
||||
list of tuples: [(coefficient, [(gate, qubit), ...]), ...]
|
||||
- coefficient: The prefactor of the term.
|
||||
- list of (gate, qubit): Each term's gates and the qubits they act on.
|
||||
"""
|
||||
extracted_terms = []
|
||||
|
||||
if isinstance(hamiltonian, hamiltonians.SymbolicHamiltonian):
|
||||
for term in hamiltonian.terms:
|
||||
coeff = term.coefficient # Extract coefficient
|
||||
gate_qubit_list = []
|
||||
|
||||
# Extract gate and qubit information
|
||||
for factor in term.factors:
|
||||
gate_name = str(factor)[
|
||||
0
|
||||
] # Extract the gate type (X, Y, Z) from 'X0', 'Z1'
|
||||
qubit = int(str(factor)[1:]) # Extract the qubit index
|
||||
gate_qubit_list.append((qubit, gate_name, coeff))
|
||||
coeff = 1.0
|
||||
|
||||
extracted_terms.append(gate_qubit_list)
|
||||
|
||||
else:
|
||||
raise ValueError(
|
||||
"Unsupported Hamiltonian type. Must be SymbolicHamiltonian or Hamiltonian."
|
||||
)
|
||||
|
||||
return extracted_terms
|
||||
|
||||
|
||||
def initialize_mpi():
|
||||
"""Initialize MPI communication and device selection."""
|
||||
comm = MPI.COMM_WORLD
|
||||
rank = comm.Get_rank()
|
||||
size = comm.Get_size()
|
||||
device_id = rank % getDeviceCount()
|
||||
cp.cuda.Device(device_id).use()
|
||||
return comm, rank, size, device_id
|
||||
|
||||
|
||||
def initialize_nccl(comm_mpi, rank, size):
|
||||
"""Initialize NCCL communication."""
|
||||
nccl_id = nccl.get_unique_id() if rank == 0 else None
|
||||
nccl_id = comm_mpi.bcast(nccl_id, root=0)
|
||||
return nccl.NcclCommunicator(size, nccl_id, rank)
|
||||
|
||||
|
||||
def get_operands(qibo_circ, datatype, rank, comm):
|
||||
"""Perform circuit conversion and broadcast operands."""
|
||||
if rank == 0:
|
||||
myconvertor = QiboCircuitToEinsum(qibo_circ, dtype=datatype)
|
||||
operands = myconvertor.state_vector_operands()
|
||||
else:
|
||||
operands = None
|
||||
return comm.bcast(operands, root=0)
|
||||
|
||||
|
||||
def compute_optimal_path(network, n_samples, size, comm):
|
||||
"""Compute contraction path and broadcast optimal selection."""
|
||||
path, info = network.contract_path(
|
||||
optimize={
|
||||
"samples": n_samples,
|
||||
"slicing": {
|
||||
"min_slices": max(32, size),
|
||||
"memory_model": cutn.MemoryModel.CUTENSOR,
|
||||
},
|
||||
}
|
||||
)
|
||||
opt_cost, sender = comm.allreduce(
|
||||
sendobj=(info.opt_cost, comm.Get_rank()), op=MPI.MINLOC
|
||||
)
|
||||
return comm.bcast(info, sender)
|
||||
|
||||
|
||||
def compute_slices(info, rank, size):
|
||||
"""Determine the slice range each process should compute."""
|
||||
num_slices = info.num_slices
|
||||
chunk, extra = num_slices // size, num_slices % size
|
||||
slice_begin = rank * chunk + min(rank, extra)
|
||||
slice_end = (
|
||||
num_slices if rank == size - 1 else (rank + 1) * chunk + min(rank + 1, extra)
|
||||
)
|
||||
return range(slice_begin, slice_end)
|
||||
|
||||
|
||||
def reduce_result(result, comm, method="MPI", root=0):
|
||||
"""Reduce results across processes."""
|
||||
if method == "MPI":
|
||||
return comm.reduce(sendobj=result, op=MPI.SUM, root=root)
|
||||
|
||||
elif method == "NCCL":
|
||||
stream_ptr = cp.cuda.get_current_stream().ptr
|
||||
if result.dtype == cp.complex128:
|
||||
count = result.size * 2 # complex128 has 2 float64 numbers
|
||||
nccl_type = nccl.NCCL_FLOAT64
|
||||
elif result.dtype == cp.complex64:
|
||||
count = result.size * 2 # complex64 has 2 float32 numbers
|
||||
nccl_type = nccl.NCCL_FLOAT32
|
||||
else:
|
||||
raise TypeError(f"Unsupported dtype for NCCL reduce: {result.dtype}")
|
||||
|
||||
comm.reduce(
|
||||
result.data.ptr,
|
||||
result.data.ptr,
|
||||
count,
|
||||
nccl_type,
|
||||
nccl.NCCL_SUM,
|
||||
root,
|
||||
stream_ptr,
|
||||
)
|
||||
return result
|
||||
else:
|
||||
raise ValueError(f"Unknown reduce method: {method}")
|
||||
|
||||
|
||||
def dense_vector_tn_MPI(qibo_circ, datatype, n_samples=8):
|
||||
"""Convert qibo circuit to tensornet (TN) format and perform contraction
|
||||
using multi node and multi GPU through MPI.
|
||||
|
||||
The conversion is performed by QiboCircuitToEinsum(), after which it
|
||||
goes through 2 steps: pathfinder and execution. The pathfinder looks
|
||||
at user defined number of samples (n_samples) iteratively to select
|
||||
the least costly contraction path. This is sped up with multi
|
||||
thread. After pathfinding the optimal path is used in the actual
|
||||
contraction to give a dense vector representation of the TN.
|
||||
|
||||
Parameters:
|
||||
qibo_circ: The quantum circuit object.
|
||||
datatype (str): Either single ("complex64") or double (complex128) precision.
|
||||
n_samples(int): Number of samples for pathfinding.
|
||||
|
||||
Returns:
|
||||
Dense vector of quantum circuit.
|
||||
"""
|
||||
comm, rank, size, device_id = initialize_mpi()
|
||||
operands = get_operands(qibo_circ, datatype, rank, comm)
|
||||
network = Network(*operands, options={"device_id": device_id})
|
||||
info = compute_optimal_path(network, n_samples, size, comm)
|
||||
path, info = network.contract_path(
|
||||
optimize={"path": info.path, "slicing": info.slices}
|
||||
)
|
||||
slices = compute_slices(info, rank, size)
|
||||
result = network.contract(slices=slices)
|
||||
return reduce_result(result, comm, method="MPI"), rank
|
||||
|
||||
|
||||
def dense_vector_tn_nccl(qibo_circ, datatype, n_samples=8):
|
||||
"""Convert qibo circuit to tensornet (TN) format and perform contraction
|
||||
using multi node and multi GPU through NCCL.
|
||||
|
||||
The conversion is performed by QiboCircuitToEinsum(), after which it
|
||||
goes through 2 steps: pathfinder and execution. The pathfinder looks
|
||||
at user defined number of samples (n_samples) iteratively to select
|
||||
the least costly contraction path. This is sped up with multi
|
||||
thread. After pathfinding the optimal path is used in the actual
|
||||
contraction to give a dense vector representation of the TN.
|
||||
|
||||
Parameters:
|
||||
qibo_circ: The quantum circuit object.
|
||||
datatype (str): Either single ("complex64") or double (complex128) precision.
|
||||
n_samples(int): Number of samples for pathfinding.
|
||||
|
||||
Returns:
|
||||
Dense vector of quantum circuit.
|
||||
"""
|
||||
comm_mpi, rank, size, device_id = initialize_mpi()
|
||||
comm_nccl = initialize_nccl(comm_mpi, rank, size)
|
||||
operands = get_operands(qibo_circ, datatype, rank, comm_mpi)
|
||||
network = Network(*operands)
|
||||
info = compute_optimal_path(network, n_samples, size, comm_mpi)
|
||||
path, info = network.contract_path(
|
||||
optimize={"path": info.path, "slicing": info.slices}
|
||||
)
|
||||
slices = compute_slices(info, rank, size)
|
||||
result = network.contract(slices=slices)
|
||||
return reduce_result(result, comm_nccl, method="NCCL"), rank
|
||||
|
||||
|
||||
def dense_vector_tn(qibo_circ, datatype):
|
||||
"""Convert qibo circuit to tensornet (TN) format and perform contraction to
|
||||
dense vector.
|
||||
|
||||
Parameters:
|
||||
qibo_circ: The quantum circuit object.
|
||||
datatype (str): Either single ("complex64") or double (complex128) precision.
|
||||
|
||||
Returns:
|
||||
Dense vector of quantum circuit.
|
||||
"""
|
||||
myconvertor = QiboCircuitToEinsum(qibo_circ, dtype=datatype)
|
||||
return contract(*myconvertor.state_vector_operands())
|
||||
|
||||
|
||||
def expectation_tn_nccl(qibo_circ, datatype, observable, n_samples=8):
|
||||
"""Convert qibo circuit to tensornet (TN) format and perform contraction to
|
||||
expectation of given Pauli string using multi node and multi GPU through
|
||||
NCCL.
|
||||
|
||||
The conversion is performed by QiboCircuitToEinsum(), after which it
|
||||
goes through 2 steps: pathfinder and execution. The
|
||||
pauli_string_pattern is used to generate the pauli string
|
||||
corresponding to the number of qubits of the system. The pathfinder
|
||||
looks at user defined number of samples (n_samples) iteratively to
|
||||
select the least costly contraction path. This is sped up with multi
|
||||
thread. After pathfinding the optimal path is used in the actual
|
||||
contraction to give an expectation value.
|
||||
|
||||
Parameters:
|
||||
qibo_circ: The quantum circuit object.
|
||||
datatype (str): Either single ("complex64") or double (complex128) precision.
|
||||
pauli_string_pattern(str): pauli string pattern.
|
||||
n_samples(int): Number of samples for pathfinding.
|
||||
|
||||
Returns:
|
||||
Expectation of quantum circuit due to pauli string.
|
||||
"""
|
||||
|
||||
comm_mpi, rank, size, device_id = initialize_mpi()
|
||||
|
||||
comm_nccl = initialize_nccl(comm_mpi, rank, size)
|
||||
|
||||
observable = check_observable(observable, qibo_circ.nqubits)
|
||||
|
||||
ham_gate_map = extract_gates_and_qubits(observable)
|
||||
|
||||
if rank == 0:
|
||||
myconvertor = QiboCircuitToEinsum(qibo_circ, dtype=datatype)
|
||||
|
||||
exp = 0
|
||||
for each_ham in ham_gate_map:
|
||||
ham_gates = get_ham_gates(each_ham)
|
||||
# Perform circuit conversion
|
||||
if rank == 0:
|
||||
operands = myconvertor.expectation_operands(ham_gates)
|
||||
else:
|
||||
operands = None
|
||||
|
||||
operands = comm_mpi.bcast(operands, root=0)
|
||||
|
||||
network = Network(*operands)
|
||||
|
||||
# Compute the path on all ranks with 8 samples for hyperoptimization. Force slicing to enable parallel contraction.
|
||||
info = compute_optimal_path(network, n_samples, size, comm_mpi)
|
||||
|
||||
# Recompute path with the selected optimal settings
|
||||
path, info = network.contract_path(
|
||||
optimize={"path": info.path, "slicing": info.slices}
|
||||
)
|
||||
|
||||
slices = compute_slices(info, rank, size)
|
||||
|
||||
# Contract the group of slices the process is responsible for.
|
||||
result = network.contract(slices=slices)
|
||||
|
||||
# Sum the partial contribution from each process on root.
|
||||
result = reduce_result(result, comm_nccl, method="NCCL", root=0)
|
||||
|
||||
exp += result
|
||||
|
||||
return exp, rank
|
||||
|
||||
|
||||
def expectation_tn_MPI(qibo_circ, datatype, observable, n_samples=8):
|
||||
"""Convert qibo circuit to tensornet (TN) format and perform contraction to
|
||||
expectation of given Pauli string using multi node and multi GPU through
|
||||
MPI.
|
||||
|
||||
The conversion is performed by QiboCircuitToEinsum(), after which it
|
||||
goes through 2 steps: pathfinder and execution. The
|
||||
pauli_string_pattern is used to generate the pauli string
|
||||
corresponding to the number of qubits of the system. The pathfinder
|
||||
looks at user defined number of samples (n_samples) iteratively to
|
||||
select the least costly contraction path. This is sped up with multi
|
||||
thread. After pathfinding the optimal path is used in the actual
|
||||
contraction to give an expectation value.
|
||||
|
||||
Parameters:
|
||||
qibo_circ: The quantum circuit object.
|
||||
datatype (str): Either single ("complex64") or double (complex128) precision.
|
||||
pauli_string_pattern(str): pauli string pattern.
|
||||
n_samples(int): Number of samples for pathfinding.
|
||||
|
||||
Returns:
|
||||
Expectation of quantum circuit due to pauli string.
|
||||
"""
|
||||
# Initialize MPI and device
|
||||
comm, rank, size, device_id = initialize_mpi()
|
||||
|
||||
observable = check_observable(observable, qibo_circ.nqubits)
|
||||
|
||||
ham_gate_map = extract_gates_and_qubits(observable)
|
||||
|
||||
if rank == 0:
|
||||
myconvertor = QiboCircuitToEinsum(qibo_circ, dtype=datatype)
|
||||
exp = 0
|
||||
for each_ham in ham_gate_map:
|
||||
ham_gates = get_ham_gates(each_ham)
|
||||
# Perform circuit conversion
|
||||
# Perform circuit conversion
|
||||
if rank == 0:
|
||||
operands = myconvertor.expectation_operands(ham_gates)
|
||||
else:
|
||||
operands = None
|
||||
|
||||
operands = comm.bcast(operands, root=0)
|
||||
|
||||
# Create network object.
|
||||
network = Network(*operands, options={"device_id": device_id})
|
||||
|
||||
# Compute optimal contraction path
|
||||
info = compute_optimal_path(network, n_samples, size, comm)
|
||||
|
||||
# Set path and slices.
|
||||
path, info = network.contract_path(
|
||||
optimize={"path": info.path, "slicing": info.slices}
|
||||
)
|
||||
|
||||
# Compute slice range for each rank
|
||||
slices = compute_slices(info, rank, size)
|
||||
|
||||
# Perform contraction
|
||||
result = network.contract(slices=slices)
|
||||
|
||||
# Sum the partial contribution from each process on root.
|
||||
result = reduce_result(result, comm, method="MPI", root=0)
|
||||
|
||||
if rank == 0:
|
||||
exp += result
|
||||
|
||||
return exp, rank
|
||||
|
||||
|
||||
def expectation_tn(qibo_circ, datatype, observable):
|
||||
"""Convert qibo circuit to tensornet (TN) format and perform contraction to
|
||||
expectation of given Pauli string.
|
||||
|
||||
Parameters:
|
||||
qibo_circ: The quantum circuit object.
|
||||
datatype (str): Either single ("complex64") or double (complex128) precision.
|
||||
pauli_string_pattern(str): pauli string pattern.
|
||||
|
||||
Returns:
|
||||
Expectation of quantum circuit due to pauli string.
|
||||
"""
|
||||
myconvertor = QiboCircuitToEinsum(qibo_circ, dtype=datatype)
|
||||
|
||||
observable = check_observable(observable, qibo_circ.nqubits)
|
||||
|
||||
ham_gate_map = extract_gates_and_qubits(observable)
|
||||
exp = 0
|
||||
for each_ham in ham_gate_map:
|
||||
ham_gates = get_ham_gates(each_ham)
|
||||
expectation_operands = myconvertor.expectation_operands(ham_gates)
|
||||
exp += contract(*expectation_operands)
|
||||
return exp
|
||||
|
||||
|
||||
def dense_vector_mps(qibo_circ, gate_algo, datatype):
|
||||
"""Convert qibo circuit to matrix product state (MPS) format and perform
|
||||
contraction to dense vector.
|
||||
|
||||
Parameters:
|
||||
qibo_circ: The quantum circuit object.
|
||||
gate_algo(dict): Dictionary for SVD and QR settings.
|
||||
datatype (str): Either single ("complex64") or double (complex128) precision.
|
||||
|
||||
Returns:
|
||||
Dense vector of quantum circuit.
|
||||
"""
|
||||
myconvertor = QiboCircuitToMPS(qibo_circ, gate_algo, dtype=datatype)
|
||||
mps_helper = MPSContractionHelper(myconvertor.num_qubits)
|
||||
|
||||
return mps_helper.contract_state_vector(
|
||||
myconvertor.mps_tensors, {"handle": myconvertor.handle}
|
||||
)
|
||||
46
src/qibotn/eval_qu.py
Normal file
46
src/qibotn/eval_qu.py
Normal file
@@ -0,0 +1,46 @@
|
||||
import numpy as np
|
||||
import quimb.tensor as qtn
|
||||
|
||||
|
||||
def init_state_tn(nqubits, init_state_sv):
|
||||
"""Create a matrix product state directly from a dense vector.
|
||||
|
||||
Args:
|
||||
nqubits (int): Total number of qubits in the circuit.
|
||||
init_state_sv (list): Initial state in the dense vector form.
|
||||
|
||||
Returns:
|
||||
list: Matrix product state representation of the dense vector.
|
||||
"""
|
||||
|
||||
dims = tuple(2 * np.ones(nqubits, dtype=int))
|
||||
|
||||
return qtn.tensor_1d.MatrixProductState.from_dense(init_state_sv, dims)
|
||||
|
||||
|
||||
def dense_vector_tn_qu(qasm: str, initial_state, mps_opts, backend="numpy"):
|
||||
"""Evaluate circuit in QASM format with Quimb.
|
||||
|
||||
Args:
|
||||
qasm (str): QASM program.
|
||||
initial_state (list): Initial state in the dense vector form. If ``None`` the default ``|00...0>`` state is used.
|
||||
mps_opts (dict): Parameters to tune the gate_opts for mps settings in ``class quimb.tensor.circuit.CircuitMPS``.
|
||||
backend (str): Backend to perform the contraction with, e.g. ``numpy``, ``cupy``, ``jax``. Passed to ``opt_einsum``.
|
||||
|
||||
Returns:
|
||||
list: Amplitudes of final state after the simulation of the circuit.
|
||||
"""
|
||||
|
||||
if initial_state is not None:
|
||||
nqubits = int(np.log2(len(initial_state)))
|
||||
initial_state = init_state_tn(nqubits, initial_state)
|
||||
|
||||
circ_cls = qtn.circuit.CircuitMPS if mps_opts else qtn.circuit.Circuit
|
||||
circ_quimb = circ_cls.from_openqasm2_str(
|
||||
qasm, psi0=initial_state, gate_opts=mps_opts
|
||||
)
|
||||
|
||||
interim = circ_quimb.psi.full_simplify(seq="DRC")
|
||||
amplitudes = interim.to_dense(backend=backend)
|
||||
|
||||
return amplitudes
|
||||
118
src/qibotn/mps_contraction_helper.py
Normal file
118
src/qibotn/mps_contraction_helper.py
Normal file
@@ -0,0 +1,118 @@
|
||||
from cuquantum.tensornet import contract, contract_path
|
||||
|
||||
# Reference: https://github.com/NVIDIA/cuQuantum/blob/main/python/samples/cutensornet/tn_algorithms/mps_algorithms.ipynb
|
||||
|
||||
|
||||
class MPSContractionHelper:
|
||||
"""A helper class to compute various quantities for a given MPS.
|
||||
|
||||
Interleaved format is used to construct the input args for `cuquantum.contract`.
|
||||
|
||||
Reference: https://github.com/NVIDIA/cuQuantum/blob/main/python/samples/cutensornet/tn_algorithms/mps_algorithms.ipynb
|
||||
|
||||
The following compute quantities are supported:
|
||||
|
||||
- the norm of the MPS.
|
||||
- the equivalent state vector from the MPS.
|
||||
- the expectation value for a given operator.
|
||||
- the equivalent state vector after multiplying an MPO to an MPS.
|
||||
|
||||
Parameters:
|
||||
num_qubits: The number of qubits for the MPS.
|
||||
"""
|
||||
|
||||
def __init__(self, num_qubits):
|
||||
self.num_qubits = num_qubits
|
||||
self.bra_modes = [(2 * i, 2 * i + 1, 2 * i + 2) for i in range(num_qubits)]
|
||||
offset = 2 * num_qubits + 1
|
||||
self.ket_modes = [
|
||||
(i + offset, 2 * i + 1, i + 1 + offset) for i in range(num_qubits)
|
||||
]
|
||||
|
||||
def contract_norm(self, mps_tensors, options=None):
|
||||
"""Contract the corresponding tensor network to form the norm of the
|
||||
MPS.
|
||||
|
||||
Parameters:
|
||||
mps_tensors: A list of rank-3 ndarray-like tensor objects.
|
||||
The indices of the ith tensor are expected to be bonding index to the i-1 tensor,
|
||||
the physical mode, and then the bonding index to the i+1th tensor.
|
||||
options: Specify the contract and decompose options.
|
||||
|
||||
Returns:
|
||||
The norm of the MPS.
|
||||
"""
|
||||
interleaved_inputs = []
|
||||
for i, o in enumerate(mps_tensors):
|
||||
interleaved_inputs.extend(
|
||||
[o, self.bra_modes[i], o.conj(), self.ket_modes[i]]
|
||||
)
|
||||
interleaved_inputs.append([]) # output
|
||||
return self._contract(interleaved_inputs, options=options).real
|
||||
|
||||
def contract_state_vector(self, mps_tensors, options=None):
|
||||
"""Contract the corresponding tensor network to form the state vector
|
||||
representation of the MPS.
|
||||
|
||||
Parameters:
|
||||
mps_tensors: A list of rank-3 ndarray-like tensor objects.
|
||||
The indices of the ith tensor are expected to be bonding index to the i-1 tensor,
|
||||
the physical mode, and then the bonding index to the i+1th tensor.
|
||||
options: Specify the contract and decompose options.
|
||||
|
||||
Returns:
|
||||
An ndarray-like object as the state vector.
|
||||
"""
|
||||
interleaved_inputs = []
|
||||
for i, o in enumerate(mps_tensors):
|
||||
interleaved_inputs.extend([o, self.bra_modes[i]])
|
||||
output_modes = tuple([bra_modes[1] for bra_modes in self.bra_modes])
|
||||
interleaved_inputs.append(output_modes) # output
|
||||
return self._contract(interleaved_inputs, options=options)
|
||||
|
||||
def contract_expectation(
|
||||
self, mps_tensors, operator, qubits, options=None, normalize=False
|
||||
):
|
||||
"""Contract the corresponding tensor network to form the expectation of
|
||||
the MPS.
|
||||
|
||||
Parameters:
|
||||
mps_tensors: A list of rank-3 ndarray-like tensor objects.
|
||||
The indices of the ith tensor are expected to be bonding index to the i-1 tensor,
|
||||
the physical mode, and then the bonding index to the i+1th tensor.
|
||||
operator: A ndarray-like tensor object.
|
||||
The modes of the operator are expected to be output qubits followed by input qubits, e.g,
|
||||
``A, B, a, b`` where `a, b` denotes the inputs and `A, B'` denotes the outputs.
|
||||
qubits: A sequence of integers specifying the qubits that the operator is acting on.
|
||||
options: Specify the contract and decompose options.
|
||||
normalize: Whether to scale the expectation value by the normalization factor.
|
||||
|
||||
Returns:
|
||||
An ndarray-like object as the state vector.
|
||||
"""
|
||||
|
||||
interleaved_inputs = []
|
||||
extra_mode = 3 * self.num_qubits + 2
|
||||
operator_modes = [None] * len(qubits) + [self.bra_modes[q][1] for q in qubits]
|
||||
qubits = list(qubits)
|
||||
for i, o in enumerate(mps_tensors):
|
||||
interleaved_inputs.extend([o, self.bra_modes[i]])
|
||||
k_modes = self.ket_modes[i]
|
||||
if i in qubits:
|
||||
k_modes = (k_modes[0], extra_mode, k_modes[2])
|
||||
q = qubits.index(i)
|
||||
operator_modes[q] = extra_mode # output modes
|
||||
extra_mode += 1
|
||||
interleaved_inputs.extend([o.conj(), k_modes])
|
||||
interleaved_inputs.extend([operator, tuple(operator_modes)])
|
||||
interleaved_inputs.append([]) # output
|
||||
if normalize:
|
||||
norm = self.contract_norm(mps_tensors, options=options)
|
||||
else:
|
||||
norm = 1
|
||||
return self._contract(interleaved_inputs, options=options) / norm
|
||||
|
||||
def _contract(self, interleaved_inputs, options=None):
|
||||
path = contract_path(*interleaved_inputs, options=options)[0]
|
||||
|
||||
return contract(*interleaved_inputs, options=options, optimize={"path": path})
|
||||
95
src/qibotn/mps_utils.py
Normal file
95
src/qibotn/mps_utils.py
Normal file
@@ -0,0 +1,95 @@
|
||||
import cupy as cp
|
||||
from cuquantum.tensornet import contract
|
||||
from cuquantum.tensornet.experimental import contract_decompose
|
||||
|
||||
|
||||
def initial(num_qubits, dtype):
|
||||
r"""Generate the MPS with an initial state of :math:`\ket{00...00}`
|
||||
|
||||
Parameters:
|
||||
num_qubits: Number of qubits in the Quantum Circuit.
|
||||
dtype: Either single ("complex64") or double (complex128) precision.
|
||||
|
||||
Returns:
|
||||
The initial MPS tensors.
|
||||
"""
|
||||
state_tensor = cp.asarray([1, 0], dtype=dtype).reshape(1, 2, 1)
|
||||
mps_tensors = [state_tensor] * num_qubits
|
||||
return mps_tensors
|
||||
|
||||
|
||||
def mps_site_right_swap(mps_tensors, i, **kwargs):
|
||||
"""Perform the swap operation between the ith and i+1th MPS tensors.
|
||||
|
||||
Parameters:
|
||||
mps_tensors: Tensors representing MPS
|
||||
i (int): index of the tensor to swap
|
||||
|
||||
Returns:
|
||||
The updated MPS tensors.
|
||||
"""
|
||||
# contraction followed by QR decomposition
|
||||
a, _, b = contract_decompose(
|
||||
"ipj,jqk->iqj,jpk",
|
||||
*mps_tensors[i : i + 2],
|
||||
algorithm=kwargs.get("algorithm", None),
|
||||
options=kwargs.get("options", None),
|
||||
)
|
||||
mps_tensors[i : i + 2] = (a, b)
|
||||
return mps_tensors
|
||||
|
||||
|
||||
def apply_gate(mps_tensors, gate, qubits, **kwargs):
|
||||
"""Apply the gate operand to the MPS tensors in-place.
|
||||
|
||||
# Reference: https://github.com/NVIDIA/cuQuantum/blob/main/python/samples/cutensornet/tn_algorithms/mps_algorithms.ipynb
|
||||
|
||||
Parameters:
|
||||
mps_tensors: A list of rank-3 ndarray-like tensor objects.
|
||||
The indices of the ith tensor are expected to be the bonding index to the i-1 tensor,
|
||||
the physical mode, and then the bonding index to the i+1th tensor.
|
||||
gate: A ndarray-like tensor object representing the gate operand.
|
||||
The modes of the gate is expected to be output qubits followed by input qubits, e.g,
|
||||
``A, B, a, b`` where ``a, b`` denotes the inputs and ``A, B`` denotes the outputs.
|
||||
qubits: A sequence of integers denoting the qubits that the gate is applied onto.
|
||||
algorithm: The contract and decompose algorithm to use for gate application.
|
||||
Can be either a `dict` or a `ContractDecomposeAlgorithm`.
|
||||
options: Specify the contract and decompose options.
|
||||
|
||||
Returns:
|
||||
The updated MPS tensors.
|
||||
"""
|
||||
|
||||
n_qubits = len(qubits)
|
||||
if n_qubits == 1:
|
||||
# single-qubit gate
|
||||
i = qubits[0]
|
||||
mps_tensors[i] = contract(
|
||||
"ipj,qp->iqj", mps_tensors[i], gate, options=kwargs.get("options", None)
|
||||
) # in-place update
|
||||
elif n_qubits == 2:
|
||||
# two-qubit gate
|
||||
i, j = qubits
|
||||
if i > j:
|
||||
# swap qubits order
|
||||
return apply_gate(mps_tensors, gate.transpose(1, 0, 3, 2), (j, i), **kwargs)
|
||||
elif i + 1 == j:
|
||||
# two adjacent qubits
|
||||
a, _, b = contract_decompose(
|
||||
"ipj,jqk,rspq->irj,jsk",
|
||||
*mps_tensors[i : i + 2],
|
||||
gate,
|
||||
algorithm=kwargs.get("algorithm", None),
|
||||
options=kwargs.get("options", None),
|
||||
)
|
||||
mps_tensors[i : i + 2] = (a, b) # in-place update
|
||||
else:
|
||||
# non-adjacent two-qubit gate
|
||||
# step 1: swap i with i+1
|
||||
mps_site_right_swap(mps_tensors, i, **kwargs)
|
||||
# step 2: apply gate to (i+1, j) pair. This amounts to a recursive swap until the two qubits are adjacent
|
||||
apply_gate(mps_tensors, gate, (i + 1, j), **kwargs)
|
||||
# step 3: swap back i and i+1
|
||||
mps_site_right_swap(mps_tensors, i, **kwargs)
|
||||
else:
|
||||
raise NotImplementedError("Only one- and two-qubit gates supported")
|
||||
66
src/qibotn/result.py
Normal file
66
src/qibotn/result.py
Normal file
@@ -0,0 +1,66 @@
|
||||
from copy import deepcopy
|
||||
from dataclasses import dataclass
|
||||
from typing import Union
|
||||
|
||||
from numpy import ndarray
|
||||
from qibo.config import raise_error
|
||||
|
||||
from qibotn.backends.abstract import QibotnBackend
|
||||
|
||||
|
||||
@dataclass
|
||||
class TensorNetworkResult:
|
||||
"""
|
||||
Object to store and process the output of a Tensor Network simulation of a quantum circuit.
|
||||
|
||||
Args:
|
||||
nqubits (int): number of qubits involved in the simulation;
|
||||
backend (QibotnBackend): specific backend on which the simulation has been performed;
|
||||
measures (dict): measures (if performed) during the tensor network simulation;
|
||||
measured_probabilities (Union[dict, ndarray]): probabilities of the final state
|
||||
according to the simulation;
|
||||
prob_type (str): string identifying the method used to compute the probabilities.
|
||||
Especially useful in case the `QmatchateaBackend` is selected.
|
||||
statevector (ndarray): if computed, the reconstructed statevector.
|
||||
"""
|
||||
|
||||
nqubits: int
|
||||
backend: QibotnBackend
|
||||
measures: dict
|
||||
measured_probabilities: Union[dict, ndarray]
|
||||
prob_type: str
|
||||
statevector: ndarray
|
||||
|
||||
def __post_init__(self):
|
||||
# TODO: define the general convention when using backends different from qmatchatea
|
||||
if self.measured_probabilities is None:
|
||||
self.measured_probabilities = {"default": self.measured_probabilities}
|
||||
|
||||
def probabilities(self):
|
||||
"""Return calculated probabilities according to the given method."""
|
||||
if self.prob_type == "U":
|
||||
measured_probabilities = deepcopy(self.measured_probabilities)
|
||||
for bitstring, prob in self.measured_probabilities[self.prob_type].items():
|
||||
measured_probabilities[self.prob_type][bitstring] = prob[1] - prob[0]
|
||||
probabilities = measured_probabilities[self.prob_type]
|
||||
else:
|
||||
probabilities = self.measured_probabilities
|
||||
return probabilities
|
||||
|
||||
def frequencies(self):
|
||||
"""Return frequencies if a certain number of shots has been set."""
|
||||
if self.measures is None:
|
||||
raise_error(
|
||||
ValueError,
|
||||
f"To access frequencies, circuit has to be executed with a given number of shots != None",
|
||||
)
|
||||
return self.measures
|
||||
|
||||
def state(self):
|
||||
"""Return the statevector if the number of qubits is less than 20."""
|
||||
if self.nqubits < 20:
|
||||
return self.statevector
|
||||
raise_error(
|
||||
NotImplementedError,
|
||||
f"Tensor network simulation cannot be used to reconstruct statevector for >= 20 .",
|
||||
)
|
||||
12
tests/config.py
Normal file
12
tests/config.py
Normal file
@@ -0,0 +1,12 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import Optional
|
||||
|
||||
|
||||
@dataclass
|
||||
class Executor:
|
||||
backend: str
|
||||
platform: Optional[str] = None
|
||||
|
||||
|
||||
qibo = Executor(backend="qibojit", platform="numpy")
|
||||
quimb = Executor(backend="numpy")
|
||||
66
tests/conftest.py
Normal file
66
tests/conftest.py
Normal file
@@ -0,0 +1,66 @@
|
||||
"""conftest.py.
|
||||
|
||||
Pytest fixtures.
|
||||
"""
|
||||
|
||||
import sys
|
||||
|
||||
import pytest
|
||||
|
||||
# backends to be tested
|
||||
# TODO: add cutensornet and quimb here as well
|
||||
BACKENDS = ["cutensornet"]
|
||||
# BACKENDS = ["qmatchatea"]
|
||||
|
||||
|
||||
def get_backend(backend_name):
|
||||
|
||||
from qibotn.backends.cutensornet import CuTensorNet
|
||||
from qibotn.backends.qmatchatea import QMatchaTeaBackend
|
||||
|
||||
NAME2BACKEND = {"qmatchatea": QMatchaTeaBackend, "cutensornet": CuTensorNet}
|
||||
|
||||
return NAME2BACKEND[backend_name]()
|
||||
|
||||
|
||||
AVAILABLE_BACKENDS = []
|
||||
for backend_name in BACKENDS:
|
||||
try:
|
||||
_backend = get_backend(backend_name)
|
||||
AVAILABLE_BACKENDS.append(backend_name)
|
||||
except (ModuleNotFoundError, ImportError):
|
||||
pass
|
||||
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
ALL = {"darwin", "linux"}
|
||||
supported_platforms = ALL.intersection(mark.name for mark in item.iter_markers())
|
||||
plat = sys.platform
|
||||
if supported_platforms and plat not in supported_platforms: # pragma: no cover
|
||||
# case not covered by workflows
|
||||
pytest.skip(f"Cannot run test on platform {plat}.")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def backend(backend_name):
|
||||
yield get_backend(backend_name)
|
||||
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
ALL = {"darwin", "linux"}
|
||||
supported_platforms = ALL.intersection(mark.name for mark in item.iter_markers())
|
||||
plat = sys.platform
|
||||
if supported_platforms and plat not in supported_platforms: # pragma: no cover
|
||||
# case not covered by workflows
|
||||
pytest.skip(f"Cannot run test on platform {plat}.")
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
config.addinivalue_line("markers", "linux: mark test to run only on linux")
|
||||
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
module_name = metafunc.module.__name__
|
||||
|
||||
if "backend_name" in metafunc.fixturenames:
|
||||
metafunc.parametrize("backend_name", AVAILABLE_BACKENDS)
|
||||
91
tests/test_circuit_execution.py
Normal file
91
tests/test_circuit_execution.py
Normal file
@@ -0,0 +1,91 @@
|
||||
import math
|
||||
|
||||
import pytest
|
||||
from qibo import Circuit, gates, hamiltonians
|
||||
from qibo.symbols import X, Z
|
||||
|
||||
from qibotn.backends.qmatchatea import QMatchaTeaBackend
|
||||
|
||||
|
||||
def build_observable(nqubits):
|
||||
"""Helper function to construct a target observable."""
|
||||
hamiltonian_form = 0
|
||||
for i in range(nqubits):
|
||||
hamiltonian_form += 0.5 * X(i % nqubits) * Z((i + 1) % nqubits)
|
||||
|
||||
hamiltonian = hamiltonians.SymbolicHamiltonian(form=hamiltonian_form)
|
||||
return hamiltonian, hamiltonian_form
|
||||
|
||||
|
||||
def build_GHZ(nqubits):
|
||||
"""Helper function to construct a layered quantum circuit."""
|
||||
circ = Circuit(nqubits)
|
||||
circ.add(gates.H(0))
|
||||
[circ.add(gates.CNOT(q, q + 1)) for q in range(nqubits - 1)]
|
||||
return circ
|
||||
|
||||
|
||||
def construct_targets(nqubits):
|
||||
"""Construct strings of 1s and 0s of size `nqubits`."""
|
||||
ones = "1" * nqubits
|
||||
zeros = "0" * nqubits
|
||||
return ones, zeros
|
||||
|
||||
|
||||
@pytest.mark.parametrize("nqubits", [2, 10, 40])
|
||||
def test_probabilities(backend, nqubits):
|
||||
|
||||
circ = build_GHZ(nqubits=nqubits)
|
||||
|
||||
if isinstance(backend, QMatchaTeaBackend):
|
||||
# unbiased prob
|
||||
out_u = backend.execute_circuit(
|
||||
circuit=circ,
|
||||
prob_type="U",
|
||||
num_samples=1000,
|
||||
).probabilities()
|
||||
|
||||
math.isclose(out_u[0], 0.5, abs_tol=1e-7)
|
||||
math.isclose(out_u[1], 0.5, abs_tol=1e-7)
|
||||
|
||||
out_g = backend.execute_circuit(
|
||||
circuit=circ,
|
||||
prob_type="G",
|
||||
prob_threshold=1.0,
|
||||
).probabilities()
|
||||
|
||||
math.isclose(out_g[0], 0.5, abs_tol=1e-7)
|
||||
math.isclose(out_g[1], 0.5, abs_tol=1e-7)
|
||||
|
||||
out_e = backend.execute_circuit(
|
||||
circuit=circ,
|
||||
prob_type="E",
|
||||
prob_threshold=0.2,
|
||||
).probabilities()
|
||||
|
||||
math.isclose(out_e[0], 0.5, abs_tol=1e-7)
|
||||
math.isclose(out_e[1], 0.5, abs_tol=1e-7)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("nqubits", [2, 10, 40])
|
||||
@pytest.mark.parametrize("nshots", [100, 1000])
|
||||
def test_shots(backend, nqubits, nshots):
|
||||
circ = build_GHZ(nqubits=nqubits)
|
||||
ones, zeros = construct_targets(nqubits)
|
||||
|
||||
# For p = 0.5, sigma = sqrt(nshots * 0.5 * 0.5) = sqrt(nshots)/2.
|
||||
sigma_threshold = 3 * (math.sqrt(nshots) / 2)
|
||||
|
||||
outcome = backend.execute_circuit(circ, nshots=nshots)
|
||||
frequencies = outcome.frequencies()
|
||||
|
||||
shots_ones = frequencies.get(ones, 0)
|
||||
shots_zeros = frequencies.get(zeros, 0)
|
||||
|
||||
# Check that the counts for both outcomes are within the 3-sigma threshold of nshots/2.
|
||||
assert (
|
||||
abs(shots_ones - (nshots / 2)) < sigma_threshold
|
||||
), f"Count for {ones} deviates too much: {shots_ones} vs expected {nshots/2}"
|
||||
assert (
|
||||
abs(shots_zeros - (nshots / 2)) < sigma_threshold
|
||||
), f"Count for {zeros} deviates too much: {shots_zeros} vs expected {nshots/2}"
|
||||
199
tests/test_cuquantum_cutensor_backend.py
Normal file
199
tests/test_cuquantum_cutensor_backend.py
Normal file
@@ -0,0 +1,199 @@
|
||||
import math
|
||||
|
||||
import cupy as cp
|
||||
import pytest
|
||||
import qibo
|
||||
from qibo import construct_backend, hamiltonians
|
||||
from qibo.models import QFT
|
||||
from qibo.symbols import X, Z
|
||||
|
||||
ABS_TOL = 1e-7
|
||||
|
||||
|
||||
def qibo_qft(nqubits, swaps):
|
||||
circ_qibo = QFT(nqubits, swaps)
|
||||
state_vec = circ_qibo().state(numpy=True)
|
||||
return circ_qibo, state_vec
|
||||
|
||||
|
||||
def build_observable(nqubits):
|
||||
"""Helper function to construct a target observable."""
|
||||
hamiltonian_form = 0
|
||||
for i in range(nqubits):
|
||||
hamiltonian_form += 0.5 * X(i % nqubits) * Z((i + 1) % nqubits)
|
||||
|
||||
hamiltonian = hamiltonians.SymbolicHamiltonian(form=hamiltonian_form)
|
||||
return hamiltonian, hamiltonian_form
|
||||
|
||||
|
||||
def build_observable_dict(nqubits):
|
||||
"""Construct a target observable as a dictionary representation.
|
||||
|
||||
Returns a dictionary suitable for `create_hamiltonian_from_dict`.
|
||||
"""
|
||||
terms = []
|
||||
|
||||
for i in range(nqubits):
|
||||
term = {
|
||||
"coefficient": 0.5,
|
||||
"operators": [("X", i % nqubits), ("Z", (i + 1) % nqubits)],
|
||||
}
|
||||
terms.append(term)
|
||||
|
||||
return {"terms": terms}
|
||||
|
||||
|
||||
@pytest.mark.gpu
|
||||
@pytest.mark.parametrize("nqubits", [1, 2, 5, 10])
|
||||
def test_eval(nqubits: int, dtype="complex128"):
|
||||
"""
|
||||
Args:
|
||||
nqubits (int): Total number of qubits in the system.
|
||||
dtype (str): The data type for precision, 'complex64' for single,
|
||||
'complex128' for double.
|
||||
"""
|
||||
# Test qibo
|
||||
qibo.set_backend(backend="numpy")
|
||||
qibo_circ, result_sv = qibo_qft(nqubits, swaps=True)
|
||||
result_sv_cp = cp.asarray(result_sv)
|
||||
|
||||
# Test cutensornet
|
||||
backend = construct_backend(backend="qibotn", platform="cutensornet")
|
||||
# Test with no settings specified. Default is dense vector calculation without MPI or NCCL.
|
||||
result_tn = backend.execute_circuit(circuit=qibo_circ)
|
||||
print(
|
||||
f"State vector difference: {abs(result_tn.statevector.flatten() - result_sv_cp).max():0.3e}"
|
||||
)
|
||||
assert cp.allclose(
|
||||
result_sv_cp, result_tn.statevector.flatten()
|
||||
), "Resulting dense vectors do not match"
|
||||
|
||||
# Test with explicit settings specified.
|
||||
comp_set_w_bool = {
|
||||
"MPI_enabled": False,
|
||||
"MPS_enabled": False,
|
||||
"NCCL_enabled": False,
|
||||
"expectation_enabled": False,
|
||||
}
|
||||
backend.configure_tn_simulation(comp_set_w_bool)
|
||||
result_tn = backend.execute_circuit(circuit=qibo_circ)
|
||||
print(
|
||||
f"State vector difference: {abs(result_tn.statevector.flatten() - result_sv_cp).max():0.3e}"
|
||||
)
|
||||
assert cp.allclose(
|
||||
result_sv_cp, result_tn.statevector.flatten()
|
||||
), "Resulting dense vectors do not match"
|
||||
|
||||
|
||||
@pytest.mark.gpu
|
||||
@pytest.mark.parametrize("nqubits", [2, 5, 10])
|
||||
def test_mps(nqubits: int, dtype="complex128"):
|
||||
"""Evaluate MPS with cuQuantum.
|
||||
|
||||
Args:
|
||||
nqubits (int): Total number of qubits in the system.
|
||||
dtype (str): The data type for precision, 'complex64' for single,
|
||||
'complex128' for double.
|
||||
"""
|
||||
|
||||
# Test qibo
|
||||
qibo.set_backend(backend="numpy")
|
||||
qibo_circ, result_sv = qibo_qft(nqubits, swaps=True)
|
||||
result_sv_cp = cp.asarray(result_sv)
|
||||
|
||||
# Test cutensornet
|
||||
backend = construct_backend(backend="qibotn", platform="cutensornet")
|
||||
# Test with simple MPS settings specified using bool. Uses the default MPS parameters.
|
||||
comp_set_w_bool = {
|
||||
"MPI_enabled": False,
|
||||
"MPS_enabled": True,
|
||||
"NCCL_enabled": False,
|
||||
"expectation_enabled": False,
|
||||
}
|
||||
backend.configure_tn_simulation(comp_set_w_bool)
|
||||
result_tn = backend.execute_circuit(circuit=qibo_circ)
|
||||
print(
|
||||
f"State vector difference: {abs(result_tn.statevector.flatten() - result_sv_cp).max():0.3e}"
|
||||
)
|
||||
assert cp.allclose(
|
||||
result_tn.statevector.flatten(), result_sv_cp
|
||||
), "Resulting dense vectors do not match"
|
||||
|
||||
# Test with explicit MPS computation settings specified using Dict. Users able to specify parameters like qr_method etc.
|
||||
comp_set_w_MPS_config_para = {
|
||||
"MPI_enabled": False,
|
||||
"MPS_enabled": {
|
||||
"qr_method": False,
|
||||
"svd_method": {
|
||||
"partition": "UV",
|
||||
"abs_cutoff": 1e-12,
|
||||
},
|
||||
},
|
||||
"NCCL_enabled": False,
|
||||
"expectation_enabled": False,
|
||||
}
|
||||
backend.configure_tn_simulation(comp_set_w_MPS_config_para)
|
||||
result_tn = backend.execute_circuit(circuit=qibo_circ)
|
||||
print(
|
||||
f"State vector difference: {abs(result_tn.statevector.flatten() - result_sv_cp).max():0.3e}"
|
||||
)
|
||||
assert cp.allclose(
|
||||
result_tn.statevector.flatten(), result_sv_cp
|
||||
), "Resulting dense vectors do not match"
|
||||
|
||||
|
||||
@pytest.mark.parametrize("nqubits", [2, 5, 10])
|
||||
def test_expectation(nqubits: int, dtype="complex128"):
|
||||
|
||||
# Test qibo
|
||||
qibo_circ, state_vec_qibo = qibo_qft(nqubits, swaps=True)
|
||||
ham, ham_form = build_observable(nqubits)
|
||||
numpy_backend = construct_backend("numpy")
|
||||
exact_expval = numpy_backend.calculate_expectation_state(
|
||||
hamiltonian=ham,
|
||||
state=state_vec_qibo,
|
||||
normalize=False,
|
||||
)
|
||||
|
||||
# Test cutensornet
|
||||
backend = construct_backend(backend="qibotn", platform="cutensornet")
|
||||
|
||||
# Test with simple settings using bool. Uses default Hamilitonian for expectation calculation.
|
||||
comp_set_w_bool = {
|
||||
"MPI_enabled": False,
|
||||
"MPS_enabled": False,
|
||||
"NCCL_enabled": False,
|
||||
"expectation_enabled": True,
|
||||
}
|
||||
backend.configure_tn_simulation(comp_set_w_bool)
|
||||
result_tn = backend.execute_circuit(circuit=qibo_circ)
|
||||
assert math.isclose(
|
||||
exact_expval.item(), result_tn.real.get().item(), abs_tol=ABS_TOL
|
||||
)
|
||||
|
||||
# Test with user defined hamiltonian using "hamiltonians.SymbolicHamiltonian" object.
|
||||
comp_set_w_hamiltonian_obj = {
|
||||
"MPI_enabled": False,
|
||||
"MPS_enabled": False,
|
||||
"NCCL_enabled": False,
|
||||
"expectation_enabled": ham,
|
||||
}
|
||||
backend.configure_tn_simulation(comp_set_w_hamiltonian_obj)
|
||||
result_tn = backend.execute_circuit(circuit=qibo_circ)
|
||||
assert math.isclose(
|
||||
exact_expval.item(), result_tn.real.get().item(), abs_tol=ABS_TOL
|
||||
)
|
||||
|
||||
# Test with user defined hamiltonian using Dictionary object form of hamiltonian.
|
||||
ham_dict = build_observable_dict(nqubits)
|
||||
comp_set_w_hamiltonian_dict = {
|
||||
"MPI_enabled": False,
|
||||
"MPS_enabled": False,
|
||||
"NCCL_enabled": False,
|
||||
"expectation_enabled": ham_dict,
|
||||
}
|
||||
backend.configure_tn_simulation(comp_set_w_hamiltonian_dict)
|
||||
result_tn = backend.execute_circuit(circuit=qibo_circ)
|
||||
assert math.isclose(
|
||||
exact_expval.item(), result_tn.real.get().item(), abs_tol=ABS_TOL
|
||||
)
|
||||
315
tests/test_cuquantum_cutensor_mpi_backend.py
Normal file
315
tests/test_cuquantum_cutensor_mpi_backend.py
Normal file
@@ -0,0 +1,315 @@
|
||||
# mpirun --allow-run-as-root -np 2 python -m pytest --with-mpi test_cuquantum_cutensor_mpi_backend.py
|
||||
|
||||
import math
|
||||
|
||||
import cupy as cp
|
||||
import numpy as np
|
||||
import pytest
|
||||
import qibo
|
||||
from qibo import construct_backend, hamiltonians
|
||||
from qibo.models import QFT
|
||||
from qibo.symbols import X, Z
|
||||
|
||||
ABS_TOL = 1e-7
|
||||
|
||||
|
||||
def qibo_qft(nqubits, swaps):
|
||||
circ_qibo = QFT(nqubits, swaps)
|
||||
state_vec = circ_qibo().state(numpy=True)
|
||||
return circ_qibo, state_vec
|
||||
|
||||
|
||||
def build_observable(nqubits):
|
||||
"""Helper function to construct a target observable."""
|
||||
hamiltonian_form = 0
|
||||
for i in range(nqubits):
|
||||
hamiltonian_form += 0.5 * X(i % nqubits) * Z((i + 1) % nqubits)
|
||||
|
||||
hamiltonian = hamiltonians.SymbolicHamiltonian(form=hamiltonian_form)
|
||||
return hamiltonian, hamiltonian_form
|
||||
|
||||
|
||||
def build_observable_dict(nqubits):
|
||||
"""Construct a target observable as a dictionary representation.
|
||||
|
||||
Returns a dictionary suitable for `create_hamiltonian_from_dict`.
|
||||
"""
|
||||
terms = []
|
||||
|
||||
for i in range(nqubits):
|
||||
term = {
|
||||
"coefficient": 0.5,
|
||||
"operators": [("X", i % nqubits), ("Z", (i + 1) % nqubits)],
|
||||
}
|
||||
terms.append(term)
|
||||
|
||||
return {"terms": terms}
|
||||
|
||||
|
||||
@pytest.mark.gpu
|
||||
@pytest.mark.mpi
|
||||
@pytest.mark.parametrize("nqubits", [1, 2, 5, 7, 10])
|
||||
def test_eval_mpi(nqubits: int, dtype="complex128"):
|
||||
"""
|
||||
Args:
|
||||
nqubits (int): Total number of qubits in the system.
|
||||
dtype (str): The data type for precision, 'complex64' for single,
|
||||
'complex128' for double.
|
||||
"""
|
||||
# Test qibo
|
||||
qibo.set_backend(backend="numpy")
|
||||
qibo_circ, result_sv = qibo_qft(nqubits, swaps=True)
|
||||
result_sv_cp = cp.asarray(result_sv)
|
||||
|
||||
# Test cutensornet
|
||||
backend = construct_backend(backend="qibotn", platform="cutensornet")
|
||||
|
||||
# Test with explicit settings specified.
|
||||
comp_set_w_bool = {
|
||||
"MPI_enabled": True,
|
||||
"MPS_enabled": False,
|
||||
"NCCL_enabled": False,
|
||||
"expectation_enabled": False,
|
||||
}
|
||||
backend.configure_tn_simulation(comp_set_w_bool)
|
||||
result_tn = backend.execute_circuit(circuit=qibo_circ)
|
||||
result_tn_cp = cp.asarray(result_tn.statevector.flatten())
|
||||
|
||||
print(f"State vector difference: {abs(result_tn_cp - result_sv_cp).max():0.3e}")
|
||||
|
||||
if backend.rank == 0:
|
||||
|
||||
assert cp.allclose(
|
||||
result_sv_cp, result_tn_cp
|
||||
), "Resulting dense vectors do not match"
|
||||
else:
|
||||
assert (
|
||||
isinstance(result_tn_cp, cp.ndarray)
|
||||
and result_tn_cp.size == 1
|
||||
and result_tn_cp.item() == 0
|
||||
), f"Rank {backend.rank}: result_tn_cp should be scalar/array with 0, got {result_tn_cp}"
|
||||
|
||||
|
||||
@pytest.mark.gpu
|
||||
@pytest.mark.mpi
|
||||
@pytest.mark.parametrize("nqubits", [1, 2, 5, 7, 10])
|
||||
def test_expectation_mpi(nqubits: int, dtype="complex128"):
|
||||
|
||||
# Test qibo
|
||||
qibo_circ, state_vec_qibo = qibo_qft(nqubits, swaps=True)
|
||||
ham, ham_form = build_observable(nqubits)
|
||||
numpy_backend = construct_backend("numpy")
|
||||
exact_expval = numpy_backend.calculate_expectation_state(
|
||||
hamiltonian=ham,
|
||||
state=state_vec_qibo,
|
||||
normalize=False,
|
||||
)
|
||||
|
||||
# Test cutensornet
|
||||
backend = construct_backend(backend="qibotn", platform="cutensornet")
|
||||
|
||||
# Test with simple settings using bool. Uses default Hamilitonian for expectation calculation.
|
||||
comp_set_w_bool = {
|
||||
"MPI_enabled": True,
|
||||
"MPS_enabled": False,
|
||||
"NCCL_enabled": False,
|
||||
"expectation_enabled": True,
|
||||
}
|
||||
backend.configure_tn_simulation(comp_set_w_bool)
|
||||
result_tn = backend.execute_circuit(circuit=qibo_circ)
|
||||
if backend.rank == 0:
|
||||
# Compare numerical values
|
||||
assert math.isclose(
|
||||
exact_expval.item(), float(result_tn[0]), abs_tol=ABS_TOL
|
||||
), f"Rank {backend.rank}: mismatch, expected {exact_expval}, got {result_tn}"
|
||||
|
||||
else:
|
||||
# Rank > 0: must be hardcoded [0] (int)
|
||||
assert (
|
||||
isinstance(result_tn, (np.ndarray, cp.ndarray))
|
||||
and result_tn.size == 1
|
||||
and np.issubdtype(result_tn.dtype, np.integer)
|
||||
and result_tn.item() == 0
|
||||
), f"Rank {backend.rank}: expected int array [0], got {result_tn}"
|
||||
|
||||
# Test with user defined hamiltonian using "hamiltonians.SymbolicHamiltonian" object.
|
||||
comp_set_w_hamiltonian_obj = {
|
||||
"MPI_enabled": True,
|
||||
"MPS_enabled": False,
|
||||
"NCCL_enabled": False,
|
||||
"expectation_enabled": ham,
|
||||
}
|
||||
backend.configure_tn_simulation(comp_set_w_hamiltonian_obj)
|
||||
result_tn = backend.execute_circuit(circuit=qibo_circ)
|
||||
if backend.rank == 0:
|
||||
# Compare numerical values
|
||||
assert math.isclose(
|
||||
exact_expval.item(), float(result_tn[0]), abs_tol=ABS_TOL
|
||||
), f"Rank {backend.rank}: mismatch, expected {exact_expval}, got {result_tn}"
|
||||
|
||||
else:
|
||||
# Rank > 0: must be hardcoded [0] (int)
|
||||
assert (
|
||||
isinstance(result_tn, (np.ndarray, cp.ndarray))
|
||||
and result_tn.size == 1
|
||||
and np.issubdtype(result_tn.dtype, np.integer)
|
||||
and result_tn.item() == 0
|
||||
), f"Rank {backend.rank}: expected int array [0], got {result_tn}"
|
||||
|
||||
# Test with user defined hamiltonian using Dictionary object form of hamiltonian.
|
||||
ham_dict = build_observable_dict(nqubits)
|
||||
comp_set_w_hamiltonian_dict = {
|
||||
"MPI_enabled": True,
|
||||
"MPS_enabled": False,
|
||||
"NCCL_enabled": False,
|
||||
"expectation_enabled": ham_dict,
|
||||
}
|
||||
backend.configure_tn_simulation(comp_set_w_hamiltonian_dict)
|
||||
result_tn = backend.execute_circuit(circuit=qibo_circ)
|
||||
if backend.rank == 0:
|
||||
# Compare numerical values
|
||||
assert math.isclose(
|
||||
exact_expval.item(), float(result_tn[0]), abs_tol=ABS_TOL
|
||||
), f"Rank {backend.rank}: mismatch, expected {exact_expval}, got {result_tn}"
|
||||
|
||||
else:
|
||||
# Rank > 0: must be hardcoded [0] (int)
|
||||
assert (
|
||||
isinstance(result_tn, (np.ndarray, cp.ndarray))
|
||||
and result_tn.size == 1
|
||||
and np.issubdtype(result_tn.dtype, np.integer)
|
||||
and result_tn.item() == 0
|
||||
), f"Rank {backend.rank}: expected int array [0], got {result_tn}"
|
||||
|
||||
|
||||
@pytest.mark.gpu
|
||||
@pytest.mark.mpi
|
||||
@pytest.mark.parametrize("nqubits", [1, 2, 5, 7, 10])
|
||||
def test_eval_nccl(nqubits: int, dtype="complex128"):
|
||||
"""
|
||||
Args:
|
||||
nqubits (int): Total number of qubits in the system.
|
||||
dtype (str): The data type for precision, 'complex64' for single,
|
||||
'complex128' for double.
|
||||
"""
|
||||
# Test qibo
|
||||
qibo.set_backend(backend="numpy")
|
||||
qibo_circ, result_sv = qibo_qft(nqubits, swaps=True)
|
||||
result_sv_cp = cp.asarray(result_sv)
|
||||
|
||||
# Test cutensornet
|
||||
backend = construct_backend(backend="qibotn", platform="cutensornet")
|
||||
|
||||
# Test with explicit settings specified.
|
||||
comp_set_w_bool = {
|
||||
"MPI_enabled": False,
|
||||
"MPS_enabled": False,
|
||||
"NCCL_enabled": True,
|
||||
"expectation_enabled": False,
|
||||
}
|
||||
backend.configure_tn_simulation(comp_set_w_bool)
|
||||
result_tn = backend.execute_circuit(circuit=qibo_circ)
|
||||
result_tn_cp = cp.asarray(result_tn.statevector.flatten())
|
||||
|
||||
if backend.rank == 0:
|
||||
assert cp.allclose(
|
||||
result_sv_cp, result_tn_cp
|
||||
), "Resulting dense vectors do not match"
|
||||
else:
|
||||
assert (
|
||||
isinstance(result_tn_cp, cp.ndarray)
|
||||
and result_tn_cp.size == 1
|
||||
and result_tn_cp.item() == 0
|
||||
), f"Rank {backend.rank}: result_tn_cp should be scalar/array with 0, got {result_tn_cp}"
|
||||
|
||||
|
||||
@pytest.mark.gpu
|
||||
@pytest.mark.mpi
|
||||
@pytest.mark.parametrize("nqubits", [1, 2, 5, 7, 10])
|
||||
def test_expectation_NCCL(nqubits: int, dtype="complex128"):
|
||||
|
||||
# Test qibo
|
||||
qibo_circ, state_vec_qibo = qibo_qft(nqubits, swaps=True)
|
||||
ham, ham_form = build_observable(nqubits)
|
||||
numpy_backend = construct_backend("numpy")
|
||||
exact_expval = numpy_backend.calculate_expectation_state(
|
||||
hamiltonian=ham,
|
||||
state=state_vec_qibo,
|
||||
normalize=False,
|
||||
)
|
||||
|
||||
# Test cutensornet
|
||||
backend = construct_backend(backend="qibotn", platform="cutensornet")
|
||||
|
||||
# Test with simple settings using bool. Uses default Hamilitonian for expectation calculation.
|
||||
comp_set_w_bool = {
|
||||
"MPI_enabled": False,
|
||||
"MPS_enabled": False,
|
||||
"NCCL_enabled": True,
|
||||
"expectation_enabled": True,
|
||||
}
|
||||
backend.configure_tn_simulation(comp_set_w_bool)
|
||||
result_tn = backend.execute_circuit(circuit=qibo_circ)
|
||||
if backend.rank == 0:
|
||||
# Compare numerical values
|
||||
assert math.isclose(
|
||||
exact_expval.item(), float(result_tn[0]), abs_tol=ABS_TOL
|
||||
), f"Rank {backend.rank}: mismatch, expected {exact_expval}, got {result_tn}"
|
||||
|
||||
else:
|
||||
# Rank > 0: must be hardcoded [0] (int)
|
||||
assert (
|
||||
isinstance(result_tn, (np.ndarray, cp.ndarray))
|
||||
and result_tn.size == 1
|
||||
and np.issubdtype(result_tn.dtype, np.integer)
|
||||
and result_tn.item() == 0
|
||||
), f"Rank {backend.rank}: expected int array [0], got {result_tn}"
|
||||
|
||||
# Test with user defined hamiltonian using "hamiltonians.SymbolicHamiltonian" object.
|
||||
comp_set_w_hamiltonian_obj = {
|
||||
"MPI_enabled": False,
|
||||
"MPS_enabled": False,
|
||||
"NCCL_enabled": True,
|
||||
"expectation_enabled": ham,
|
||||
}
|
||||
backend.configure_tn_simulation(comp_set_w_hamiltonian_obj)
|
||||
result_tn = backend.execute_circuit(circuit=qibo_circ)
|
||||
if backend.rank == 0:
|
||||
# Compare numerical values
|
||||
assert math.isclose(
|
||||
exact_expval.item(), float(result_tn[0]), abs_tol=ABS_TOL
|
||||
), f"Rank {backend.rank}: mismatch, expected {exact_expval}, got {result_tn}"
|
||||
|
||||
else:
|
||||
# Rank > 0: must be hardcoded [0] (int)
|
||||
assert (
|
||||
isinstance(result_tn, (np.ndarray, cp.ndarray))
|
||||
and result_tn.size == 1
|
||||
and np.issubdtype(result_tn.dtype, np.integer)
|
||||
and result_tn.item() == 0
|
||||
), f"Rank {backend.rank}: expected int array [0], got {result_tn}"
|
||||
|
||||
# Test with user defined hamiltonian using Dictionary object form of hamiltonian.
|
||||
ham_dict = build_observable_dict(nqubits)
|
||||
comp_set_w_hamiltonian_dict = {
|
||||
"MPI_enabled": False,
|
||||
"MPS_enabled": False,
|
||||
"NCCL_enabled": True,
|
||||
"expectation_enabled": ham_dict,
|
||||
}
|
||||
backend.configure_tn_simulation(comp_set_w_hamiltonian_dict)
|
||||
result_tn = backend.execute_circuit(circuit=qibo_circ)
|
||||
if backend.rank == 0:
|
||||
# Compare numerical values
|
||||
assert math.isclose(
|
||||
exact_expval.item(), float(result_tn[0]), abs_tol=ABS_TOL
|
||||
), f"Rank {backend.rank}: mismatch, expected {exact_expval}, got {result_tn}"
|
||||
|
||||
else:
|
||||
# Rank > 0: must be hardcoded [0] (int)
|
||||
assert (
|
||||
isinstance(result_tn, (np.ndarray, cp.ndarray))
|
||||
and result_tn.size == 1
|
||||
and np.issubdtype(result_tn.dtype, np.integer)
|
||||
and result_tn.item() == 0
|
||||
), f"Rank {backend.rank}: expected int array [0], got {result_tn}"
|
||||
47
tests/test_expectation.py
Normal file
47
tests/test_expectation.py
Normal file
@@ -0,0 +1,47 @@
|
||||
import math
|
||||
import random
|
||||
|
||||
import pytest
|
||||
from qibo import Circuit, construct_backend, gates, hamiltonians
|
||||
from qibo.symbols import X, Z
|
||||
|
||||
|
||||
def build_observable(nqubits):
|
||||
"""Helper function to construct a target observable."""
|
||||
hamiltonian_form = 0
|
||||
for i in range(nqubits):
|
||||
hamiltonian_form += 0.5 * X(i % nqubits) * Z((i + 1) % nqubits)
|
||||
|
||||
hamiltonian = hamiltonians.SymbolicHamiltonian(form=hamiltonian_form)
|
||||
return hamiltonian, hamiltonian_form
|
||||
|
||||
|
||||
def build_circuit(nqubits, nlayers, seed=42):
|
||||
"""Helper function to construct a layered quantum circuit."""
|
||||
random.seed(seed)
|
||||
|
||||
circ = Circuit(nqubits)
|
||||
for _ in range(nlayers):
|
||||
for q in range(nqubits):
|
||||
circ.add(gates.RY(q=q, theta=random.uniform(-math.pi, math.pi)))
|
||||
circ.add(gates.RZ(q=q, theta=random.uniform(-math.pi, math.pi)))
|
||||
[circ.add(gates.CNOT(q % nqubits, (q + 1) % nqubits) for q in range(nqubits))]
|
||||
circ.add(gates.M(*range(nqubits)))
|
||||
return circ
|
||||
|
||||
|
||||
@pytest.mark.parametrize("nqubits", [2, 5, 10])
|
||||
def test_observable_expval(backend, nqubits):
|
||||
numpy_backend = construct_backend("numpy")
|
||||
ham, ham_form = build_observable(nqubits)
|
||||
circ = build_circuit(nqubits=nqubits, nlayers=1)
|
||||
|
||||
exact_expval = numpy_backend.calculate_expectation_state(
|
||||
hamiltonian=ham,
|
||||
state=circ().state(),
|
||||
normalize=False,
|
||||
)
|
||||
|
||||
tn_expval = backend.expectation(circuit=circ, observable=ham_form)
|
||||
|
||||
assert math.isclose(exact_expval, tn_expval, abs_tol=1e-7)
|
||||
66
tests/test_quimb_backend.py
Normal file
66
tests/test_quimb_backend.py
Normal file
@@ -0,0 +1,66 @@
|
||||
import copy
|
||||
import os
|
||||
|
||||
import config
|
||||
import numpy as np
|
||||
import pytest
|
||||
import qibo
|
||||
from qibo.models import QFT
|
||||
|
||||
|
||||
def create_init_state(nqubits):
|
||||
init_state = np.random.random(2**nqubits) + 1j * np.random.random(2**nqubits)
|
||||
init_state = init_state / np.sqrt((np.abs(init_state) ** 2).sum())
|
||||
return init_state
|
||||
|
||||
|
||||
def qibo_qft(nqubits, init_state, swaps):
|
||||
circ_qibo = QFT(nqubits, swaps)
|
||||
state_vec = circ_qibo(init_state).state(numpy=True)
|
||||
return circ_qibo, state_vec
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"nqubits, tolerance, is_mps",
|
||||
[(1, 1e-6, True), (2, 1e-6, False), (5, 1e-3, True), (10, 1e-3, False)],
|
||||
)
|
||||
def test_eval(nqubits: int, tolerance: float, is_mps: bool):
|
||||
"""Evaluate circuit with Quimb backend.
|
||||
|
||||
Args:
|
||||
nqubits (int): Total number of qubits in the system.
|
||||
tolerance (float): Maximum limit allowed for difference in results
|
||||
is_mps (bool): True if state is MPS and False for tensor network structure
|
||||
"""
|
||||
# hack quimb to use the correct number of processes
|
||||
# TODO: remove completely, or at least delegate to the backend
|
||||
# implementation
|
||||
os.environ["QUIMB_NUM_PROCS"] = str(os.cpu_count())
|
||||
import qibotn.eval_qu
|
||||
|
||||
init_state = create_init_state(nqubits=nqubits)
|
||||
init_state_tn = copy.deepcopy(init_state)
|
||||
|
||||
# Test qibo
|
||||
qibo.set_backend(backend=config.qibo.backend, platform=config.qibo.platform)
|
||||
|
||||
qibo_circ, result_sv = qibo_qft(nqubits, init_state, swaps=True)
|
||||
|
||||
# Convert to qasm for other backends
|
||||
qasm_circ = qibo_circ.to_qasm()
|
||||
|
||||
# Test quimb
|
||||
if is_mps:
|
||||
gate_opt = {}
|
||||
gate_opt["method"] = "svd"
|
||||
gate_opt["cutoff"] = 1e-6
|
||||
gate_opt["cutoff_mode"] = "abs"
|
||||
else:
|
||||
gate_opt = None
|
||||
result_tn = qibotn.eval_qu.dense_vector_tn_qu(
|
||||
qasm_circ, init_state_tn, gate_opt, backend=config.quimb.backend
|
||||
).flatten()
|
||||
|
||||
assert np.allclose(
|
||||
result_sv, result_tn, atol=tolerance
|
||||
), "Resulting dense vectors do not match"
|
||||
Reference in New Issue
Block a user