diff --git a/.github/workflows/integration_tests.yml b/.github/workflows/integration_tests.yml index 47cce0eb1e3..508ed712532 100644 --- a/.github/workflows/integration_tests.yml +++ b/.github/workflows/integration_tests.yml @@ -10,9 +10,9 @@ on: workflow_dispatch: inputs: target: - description: 'Target (choose nightly to run like nightly tests)' + description: "Target (choose nightly to run like nightly tests)" required: true - default: 'nightly' + default: "nightly" type: choice options: - nightly @@ -28,41 +28,42 @@ on: - quantinuum - scaleway - tii + - qbraid single_test_name: type: string required: false - description: 'Single test (e.g., targettests/quantinuum/load_value.cpp). Runs default tests if left blank' + description: "Single test (e.g., targettests/quantinuum/load_value.cpp). Runs default tests if left blank" target_machine: type: string required: false - description: 'Target machine (e.g., H2-1E).' + description: "Target machine (e.g., H2-1E)." cudaq_test_image: type: string required: false - default: '' # picked up from repo variable if not provided - description: 'CUDA Quantum image to run the tests in. Default to the latest CUDA Quantum nightly image' + default: "" # picked up from repo variable if not provided + description: "CUDA Quantum image to run the tests in. Default to the latest CUDA Quantum nightly image" commit_sha: type: string required: false - description: 'Commit SHA to pull the code (examples/tests) for testing. Default to the commit associated with the CUDA Quantum docker image if left blank' + description: "Commit SHA to pull the code (examples/tests) for testing. Default to the commit associated with the CUDA Quantum docker image if left blank" workflow_id: type: string required: false - description: 'Workflow Id to retrieve the Python wheel for testing. Default to the wheels produced by the Publishing workflow associated with the latest nightly CUDA Quantum Docker image if left blank' + description: "Workflow Id to retrieve the Python wheel for testing. Default to the wheels produced by the Publishing workflow associated with the latest nightly CUDA Quantum Docker image if left blank" python_version: type: choice required: true - description: 'Python version to run wheel test' + description: "Python version to run wheel test" options: - - '3.11' - - '3.12' - - '3.13' + - "3.11" + - "3.12" + - "3.13" schedule: - cron: 0 3 * * * env: - python_version: '3.12' + python_version: "3.12" jobs: # Run a daily check of all links in the docs to find any newly broken links diff --git a/docs/sphinx/targets/cpp/qbraid.cpp b/docs/sphinx/targets/cpp/qbraid.cpp new file mode 100644 index 00000000000..4b696005582 --- /dev/null +++ b/docs/sphinx/targets/cpp/qbraid.cpp @@ -0,0 +1,49 @@ +// Compile and run with: +// ``` +// nvq++ --target qbraid qbraid.cpp -o out.x && ./out.x +// ``` +// This will submit the job to the Qbraid ideal simulator target (default). + + +#include +#include + +// Define a simple quantum kernel to execute on Qbraid. +struct ghz { + // Maximally entangled state between 5 qubits. + auto operator()() __qpu__ { + cudaq::qvector q(5); + h(q[0]); + for (int i = 0; i < 4; i++) { + x(q[i], q[i + 1]); + } + auto result = mz(q); + } +}; + +int main() { + // Submit to Qbraid asynchronously (e.g., continue executing + // code in the file until the job has been returned). + auto future = cudaq::sample_async(ghz{}); + // ... classical code to execute in the meantime ... + + // Can write the future to file: + { + std::ofstream out("saveMe.json"); + out << future; + } + + // Then come back and read it in later. + cudaq::async_result readIn; + std::ifstream in("saveMe.json"); + in >> readIn; + + // Get the results of the read in future. + auto async_counts = readIn.get(); + async_counts.dump(); + + // OR: Submit to Qbraid synchronously (e.g., wait for the job + // result to be returned before proceeding). + auto counts = cudaq::sample(ghz{}); + counts.dump(); +} diff --git a/docs/sphinx/targets/python/qbraid.py b/docs/sphinx/targets/python/qbraid.py new file mode 100644 index 00000000000..8450e3a6fd8 --- /dev/null +++ b/docs/sphinx/targets/python/qbraid.py @@ -0,0 +1,52 @@ +import cudaq + +# You only have to set the target once! No need to redefine it +# for every execution call on your kernel. +# To use different targets in the same file, you must update +# it via another call to `cudaq.set_target()` +cudaq.set_target("qbraid") + + +# Create the kernel we'd like to execute on Qbraid. +@cudaq.kernel +def kernel(): + qvector = cudaq.qvector(2) + h(qvector[0]) + x.ctrl(qvector[0], qvector[1]) + + + +# Execute on Qbraid and print out the results. + +# Option A: +# By using the asynchronous `cudaq.sample_async`, the remaining +# classical code will be executed while the job is being handled +# by IonQ. This is ideal when submitting via a queue over +# the cloud. +async_results = cudaq.sample_async(kernel) +# ... more classical code to run ... + +# We can either retrieve the results later in the program with +# ``` +# async_counts = async_results.get() +# ``` +# or we can also write the job reference (`async_results`) to +# a file and load it later or from a different process. +file = open("future.txt", "w") +file.write(str(async_results)) +file.close() + +# We can later read the file content and retrieve the job +# information and results. +same_file = open("future.txt", "r") +retrieved_async_results = cudaq.AsyncSampleResult(str(same_file.read())) + +counts = retrieved_async_results.get() +print(counts) + +# Option B: +# By using the synchronous `cudaq.sample`, the execution of +# any remaining classical code in the file will occur only +# after the job has been returned from Qbraid. +counts = cudaq.sample(kernel) +print(counts) \ No newline at end of file diff --git a/docs/sphinx/using/backends/cloud.rst b/docs/sphinx/using/backends/cloud.rst index 8c03a4398cc..d2044d64e9e 100644 --- a/docs/sphinx/using/backends/cloud.rst +++ b/docs/sphinx/using/backends/cloud.rst @@ -5,6 +5,7 @@ CUDA-Q provides a number of options to access hardware resources (GPUs and QPUs) .. toctree:: :maxdepth: 1 - + Amazon Braket (braket) Scaleway QaaS (scaleway) + Qbraid diff --git a/docs/sphinx/using/backends/cloud/qbraid.rst b/docs/sphinx/using/backends/cloud/qbraid.rst new file mode 100644 index 00000000000..a7e7fe4a2ae --- /dev/null +++ b/docs/sphinx/using/backends/cloud/qbraid.rst @@ -0,0 +1,61 @@ +QBRAID ++++++++ + +.. _qbraid-backend: + +Setting Credentials +````````````````````````` + +Programmers of CUDA-Q may access the `Qbraid Devices +`__ from either C++ or Python. Generate +an API key from your `Qbraid account `__ and export +it as an environment variable: + +.. code:: bash + + export QBRAID_API_KEY="qbraid_generated_api_key" + + +Submission from Python +````````````````````````` + + First, set the :code:`qbraid` backend. + + .. code:: python + + cudaq.set_target('qbraid') + + By default, quantum kernel code will be submitted to the IonQ simulator on qBraid. + + To emulate the qbraid's simulator locally, without submitting through the cloud, you can also set the ``emulate`` flag to ``True``. This will emit any target specific compiler diagnostics. + + .. code:: python + + cudaq.set_target('qbraid', emulate=True) + + The number of shots for a kernel execution can be set through the ``shots_count`` argument to ``cudaq.sample`` or ``cudaq.observe``. By default, the ``shots_count`` is set to 1000. + + .. code:: python + + cudaq.sample(kernel, shots_count=10000) + + To see a complete example for using Qbraid's backends, take a look at our :doc:`Python examples <../../examples/examples>`. + +Submission from C++ +````````````````````````` + To target quantum kernel code for execution using qbraid, + pass the flag ``--target qbraid`` to the ``nvq++`` compiler. + + .. code:: bash + + nvq++ --target qbraid src.cpp + + This will take the API key and handle all authentication with, and submission to, the Qbraid device. By default, quantum kernel code will be submitted to the Qbraidsimulator. + + To emulate the qbraid's machine locally, without submitting through the cloud, you can also pass the ``--emulate`` flag to ``nvq++``. This will emit any target specific compiler diagnostics, before running a noise free emulation. + + .. code:: bash + + nvq++ --emulate --target qbraid src.cpp + + To see a complete example for using IonQ's backends, take a look at our :doc:`C++ examples <../../examples/examples>`. diff --git a/docs/sphinx/using/backends/hardware/iontrap.rst b/docs/sphinx/using/backends/hardware/iontrap.rst index 83e25326455..0dc69de2177 100644 --- a/docs/sphinx/using/backends/hardware/iontrap.rst +++ b/docs/sphinx/using/backends/hardware/iontrap.rst @@ -31,7 +31,7 @@ Submitting By default, quantum kernel code will be submitted to the IonQ simulator. - .. note:: + .. note:: A "target" in :code:`cudaq` refers to a quantum compute provider, such as :code:`ionq`. However, IonQ's documentation uses the term "target" to refer to specific QPU's themselves. @@ -70,7 +70,7 @@ Submitting This will take the API key and handle all authentication with, and submission to, the IonQ QPU(s). By default, quantum kernel code will be submitted to the IonQsimulator. - .. note:: + .. note:: A "target" in :code:`cudaq` refers to a quantum compute provider, such as :code:`ionq`. However, IonQ's documentation uses the term "target" to refer to specific QPU's themselves. @@ -105,7 +105,7 @@ Setting Credentials ``````````````````` Programmers of CUDA-Q may access the Quantinuum API from either -C++ or Python. Quantinuum requires a credential configuration file. +C++ or Python. Quantinuum requires a credential configuration file. The configuration file can be generated as follows, replacing the ``email`` and ``credentials`` in the first line with your Quantinuum account details. @@ -134,8 +134,8 @@ Create a project in the Nexus portal. You can find the project ID in the URL of .. tab:: Python - - The backend to which quantum kernels are submitted + + The backend to which quantum kernels are submitted can be controlled with the ``cudaq.set_target()`` function. .. code:: python @@ -155,15 +155,15 @@ Create a project in the Nexus portal. You can find the project ID in the URL of cudaq.set_target('quantinuum', machine='H2-2') where ``H2-2`` is an example of a physical QPU. Hardware specific - emulators may be accessed by appending an ``E`` to the end (e.g, ``H2-2E``). For - access to the syntax checker for the provided machine, you may append an ``SC`` + emulators may be accessed by appending an ``E`` to the end (e.g, ``H2-2E``). For + access to the syntax checker for the provided machine, you may append an ``SC`` to the end (e.g, ``H2-1SC``). - For a comprehensive list of available machines, login to your `Quantinuum Nexus user account `__ + For a comprehensive list of available machines, login to your `Quantinuum Nexus user account `__ and navigate to the "Profile" tab, where you should find a table titled "Quantinuum Systems Access". To emulate the Quantinuum machine locally, without submitting through the cloud, - you can set the ``emulate`` flag to ``True``. This will emit any target + you can set the ``emulate`` flag to ``True``. This will emit any target specific compiler warnings and diagnostics, before running a noise free emulation. You do not need to specify project or machine when emulating. @@ -175,7 +175,7 @@ Create a project in the Nexus portal. You can find the project ID in the URL of the ``shots_count`` argument to ``cudaq.sample`` or ``cudaq.observe``. By default, the ``shots_count`` is set to 1000. - .. code:: python + .. code:: python cudaq.sample(kernel, shots_count=10000) @@ -183,7 +183,7 @@ Create a project in the Nexus portal. You can find the project ID in the URL of .. tab:: C++ To target quantum kernel code for execution in the Quantinuum backends, - pass the flag ``--target quantinuum`` to the ``nvq++`` compiler. CUDA-Q will + pass the flag ``--target quantinuum`` to the ``nvq++`` compiler. CUDA-Q will authenticate via the Quantinuum REST API using the credential in your configuration file. By default, quantum kernel code will be submitted to the Quantinuum syntax checker. Submission to the syntax checker merely validates the program; the kernels are not executed. @@ -202,15 +202,15 @@ Create a project in the Nexus portal. You can find the project ID in the URL of nvq++ --target quantinuum --quantinuum-machine H2-2 src.cpp ... where ``H2-2`` is an example of a physical QPU. Hardware specific - emulators may be accessed by appending an ``E`` to the end (e.g, ``H2-2E``). For - access to the syntax checker for the provided machine, you may append an ``SC`` + emulators may be accessed by appending an ``E`` to the end (e.g, ``H2-2E``). For + access to the syntax checker for the provided machine, you may append an ``SC`` to the end (e.g, ``H2-1SC``). - For a comprehensive list of available machines, login to your `Quantinuum Nexus user account `__ + For a comprehensive list of available machines, login to your `Quantinuum Nexus user account `__ and navigate to the "Profile" tab, where you should find a table titled "Quantinuum Systems Access". To emulate the Quantinuum machine locally, without submitting through the cloud, - you can pass the ``--emulate`` flag to ``nvq++``. This will emit any target + you can pass the ``--emulate`` flag to ``nvq++``. This will emit any target specific compiler warnings and diagnostics, before running a noise free emulation. You do not need to specify project or machine when emulating. @@ -218,15 +218,15 @@ Create a project in the Nexus portal. You can find the project ID in the URL of nvq++ --emulate --target quantinuum src.cpp -.. note:: +.. note:: Quantinuum's syntax checker for Helios (e.g., ``Helios-1SC``) only performs QIR code validation and does not return any results. Thus, it always returns an empty result set. This is different from other Quantinuum backends (e.g., ``H2-1SC``) where the syntax checker returns dummy results. As a result, when using the Helios syntax checker, we may receive this warning message: .. code:: text - - WARNING: this kernel invocation produced 0 shots worth of results when executed. + + WARNING: this kernel invocation produced 0 shots worth of results when executed. It means that the kernel was successfully validated, but no execution results are available. To get results, please submit to the Helios emulator (e.g., ``Helios-1E``) or the actual quantum device (e.g., ``Helios-1``). @@ -235,12 +235,76 @@ Create a project in the Nexus portal. You can find the project ID in the URL of To see a complete example, take a look at :ref:`Quantinuum examples `. -.. note:: +.. note:: In local emulation mode (``emulate`` flag set to ``True``), the program will be executed on the :ref:`default simulator `. - The environment variable ``CUDAQ_DEFAULT_SIMULATOR`` can be used to change the emulation simulator. - + The environment variable ``CUDAQ_DEFAULT_SIMULATOR`` can be used to change the emulation simulator. + For example, the simulation floating point accuracy and/or the simulation capabilities (e.g., maximum number of qubits, supported quantum gates), - depend on the selected simulator. - + depend on the selected simulator. + Any environment variables must be set prior to setting the target or running "`import cudaq`". + +QBRAID ++++++++ + +.. _qbraid-backend: + +Setting Credentials +````````````````````````` + +Programmers of CUDA-Q may access the `Qbraid Devices +`__ from either C++ or Python. Generate +an API key from your `Qbraid account `__ and export +it as an environment variable: + +.. code:: bash + + export QBRAID_API_KEY="qbraid_generated_api_key" + + +Submitting +````````````````````````` +.. tab:: Python + + First, set the :code:`qbraid` backend. + + .. code:: python + + cudaq.set_target('qbraid') + + By default, quantum kernel code will be submitted to the IonQ simulator on qBraid. + + To emulate the qbraid's simulator locally, without submitting through the cloud, you can also set the ``emulate`` flag to ``True``. This will emit any target specific compiler diagnostics. + + .. code:: python + + cudaq.set_target('qbraid', emulate=True) + + The number of shots for a kernel execution can be set through the ``shots_count`` argument to ``cudaq.sample`` or ``cudaq.observe``. By default, the ``shots_count`` is set to 1000. + + .. code:: python + + cudaq.sample(kernel, shots_count=10000) + + To see a complete example for using Qbraid's backends, take a look at our :doc:`Python examples <../../examples/examples>`. + + +.. tab:: C++ + + To target quantum kernel code for execution using qbraid, + pass the flag ``--target qbraid`` to the ``nvq++`` compiler. + + .. code:: bash + + nvq++ --target qbraid src.cpp + + This will take the API key and handle all authentication with, and submission to, the Qbraid device. By default, quantum kernel code will be submitted to the Qbraidsimulator. + + To emulate the qbraid's machine locally, without submitting through the cloud, you can also pass the ``--emulate`` flag to ``nvq++``. This will emit any target specific compiler diagnostics, before running a noise free emulation. + + .. code:: bash + + nvq++ --emulate --target qbraid src.cpp + + To see a complete example for using IonQ's backends, take a look at our :doc:`C++ examples <../../examples/examples>`. diff --git a/lib/Optimizer/CodeGen/Passes.cpp b/lib/Optimizer/CodeGen/Passes.cpp index 4d718e050b3..41f3505432b 100644 --- a/lib/Optimizer/CodeGen/Passes.cpp +++ b/lib/Optimizer/CodeGen/Passes.cpp @@ -110,6 +110,17 @@ static void addFermioniqPipeline(OpPassManager &pm) { pm.addPass(createBasisConversion(options)); } +static void addQbraidPipeline(OpPassManager &pm) { + using namespace cudaq::opt; + std::string basis[] = { + "h", "s", "t", "rx", "ry", "rz", "x", "y", "z", "x(1)", + }; + BasisConversionPassOptions options; + options.basis = basis; + options.disabledPatterns = z_disabledPatterns; + pm.addPass(createBasisConversionPass(options)); +} + void cudaq::opt::registerTargetPipelines() { PassPipelineRegistration<>("anyon-cgate-set-mapping", "Convert kernels to Anyon gate set.", @@ -135,6 +146,9 @@ void cudaq::opt::registerTargetPipelines() { PassPipelineRegistration<>("fermioniq-gate-set-mapping", "Convert kernels to Fermioniq gate set.", addFermioniqPipeline); + PassPipelineRegistration<>("qbraid-gate-set-mapping", + "Convert kernels to qBraid gate set.", + addQbraidPipeline); } void cudaq::opt::registerCodeGenDialect(DialectRegistry ®istry) { diff --git a/runtime/cudaq/platform/default/rest/helpers/CMakeLists.txt b/runtime/cudaq/platform/default/rest/helpers/CMakeLists.txt index 5daa54ea114..4574b6ba8fe 100644 --- a/runtime/cudaq/platform/default/rest/helpers/CMakeLists.txt +++ b/runtime/cudaq/platform/default/rest/helpers/CMakeLists.txt @@ -27,3 +27,6 @@ endif() if(CUDAQ_ENABLE_TII_BACKEND) add_subdirectory(tii) endif() +if(CUDAQ_ENABLE_QBRAID_BACKEND) + add_subdirectory(qbraid) +endif() diff --git a/runtime/cudaq/platform/default/rest/helpers/qbraid/CMakeLists.txt b/runtime/cudaq/platform/default/rest/helpers/qbraid/CMakeLists.txt new file mode 100644 index 00000000000..05b059ecd25 --- /dev/null +++ b/runtime/cudaq/platform/default/rest/helpers/qbraid/CMakeLists.txt @@ -0,0 +1,17 @@ +# ============================================================================ # +# Copyright (c) 2022 - 2025 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # +target_sources(cudaq-rest-qpu PRIVATE QbraidServerHelper.cpp) +add_target_config(qbraid) + +add_library(cudaq-serverhelper-qbraid SHARED QbraidServerHelper.cpp ) +target_link_libraries(cudaq-serverhelper-qbraid + PUBLIC + cudaq-common + fmt::fmt-header-only +) +install(TARGETS cudaq-serverhelper-qbraid DESTINATION lib) \ No newline at end of file diff --git a/runtime/cudaq/platform/default/rest/helpers/qbraid/QbraidServerHelper.cpp b/runtime/cudaq/platform/default/rest/helpers/qbraid/QbraidServerHelper.cpp new file mode 100644 index 00000000000..3cd688ec579 --- /dev/null +++ b/runtime/cudaq/platform/default/rest/helpers/qbraid/QbraidServerHelper.cpp @@ -0,0 +1,307 @@ +#include "common/Logger.h" +#include "common/RestClient.h" +#include "common/ServerHelper.h" +#include "cudaq/Support/Version.h" +#include "cudaq/utils/cudaq_utils.h" +#include + +namespace cudaq { + +class QbraidServerHelper : public ServerHelper { + static constexpr const char *DEFAULT_URL = "https://api-v2.qbraid.com/api/v1"; + static constexpr const char *DEFAULT_DEVICE = "ionq:ionq:sim:simulator"; + static constexpr int DEFAULT_QUBITS = 29; + +public: + const std::string name() const override { return "qbraid"; } + + void initialize(BackendConfig config) override { + cudaq::info("Initializing Qbraid Backend."); + + backendConfig.clear(); + backendConfig["url"] = getValueOrDefault(config, "url", DEFAULT_URL); + backendConfig["user_agent"] = "cudaq/" + std::string(cudaq::getVersion()); + backendConfig["qubits"] = std::to_string(DEFAULT_QUBITS); + + // Accept "machine" as a user-friendly alias for device_id + // Usage: cudaq.set_target("qbraid", machine="ionq:ionq:sim:simulator") + if (!config["machine"].empty()) { + backendConfig["device_id"] = config["machine"]; + } else { + backendConfig["device_id"] = getValueOrDefault(config, "device_id", DEFAULT_DEVICE); + } + + // Accept api_key from target arguments, fall back to QBRAID_API_KEY env var + // Usage: cudaq.set_target("qbraid", api_key="my-key") + if (!config["api_key"].empty()) { + backendConfig["api_key"] = config["api_key"]; + } else { + backendConfig["api_key"] = getEnvVar("QBRAID_API_KEY", "", true); + } + backendConfig["job_path"] = backendConfig["url"] + "/jobs"; + + backendConfig["results_output_dir"] = getValueOrDefault(config, "results_output_dir", "./qbraid_results"); + backendConfig["results_file_prefix"] = getValueOrDefault(config, "results_file_prefix", "qbraid_job_"); + + if (!config["shots"].empty()) { + backendConfig["shots"] = config["shots"]; + this->setShots(std::stoul(config["shots"])); + } else { + backendConfig["shots"] = "1000"; + this->setShots(1000); + } + + parseConfigForCommonParams(config); + + cudaq::info("Qbraid configuration initialized:"); + for (const auto &[key, value] : backendConfig) { + cudaq::info(" {} = {}", key, value); + } + + std::string resultsDir = backendConfig["results_output_dir"]; + std::filesystem::create_directories(resultsDir); + cudaq::info("Created results directory: {}", resultsDir); + } + + ServerJobPayload + createJob(std::vector &circuitCodes) override { + if (backendConfig.find("job_path") == backendConfig.end()) { + throw std::runtime_error("job_path not found in config. Was initialize() called?"); + } + + std::vector jobs; + for (auto &circuitCode : circuitCodes) { + ServerMessage job; + job["deviceQrn"] = backendConfig.at("device_id"); + job["shots"] = std::stoi(backendConfig.at("shots")); + + // v2 API: program is a structured object with format and data + nlohmann::json program; + program["format"] = "qasm2"; + program["data"] = circuitCode.code; + job["program"] = program; + + // v2 API: name is a top-level field (not nested under tags) + if (!circuitCode.name.empty()) { + job["name"] = circuitCode.name; + } + + jobs.push_back(job); + } + + return std::make_tuple(backendConfig.at("job_path"), getHeaders(), jobs); + } + + std::string extractJobId(ServerMessage &postResponse) override { + // v2 API: jobQrn is nested under data envelope + if (postResponse.contains("data") && postResponse["data"].contains("jobQrn")) { + return postResponse["data"]["jobQrn"].get(); + } + throw std::runtime_error("ServerMessage doesn't contain 'data.jobQrn' key."); + } + + std::string constructGetJobPath(ServerMessage &postResponse) override { + // v2 API: use path parameter instead of query parameter + if (postResponse.contains("data") && postResponse["data"].contains("jobQrn")) { + return backendConfig.at("job_path") + "/" + postResponse["data"]["jobQrn"].get(); + } + throw std::runtime_error("ServerMessage doesn't contain 'data.jobQrn' key."); + } + + std::string constructGetJobPath(std::string &jobId) override { + // v2 API: /jobs/{jobQrn} + return backendConfig.at("job_path") + "/" + jobId; + } + + std::string constructGetResultsPath(const std::string &jobId) { + // v2 API: /jobs/{jobQrn}/result + return backendConfig.at("job_path") + "/" + jobId + "/result"; + } + + std::string constructGetProgramPath(const std::string &jobId) { + // v2 API: /jobs/{jobQrn}/program + return backendConfig.at("job_path") + "/" + jobId + "/program"; + } + + bool jobIsDone(ServerMessage &getJobResponse) override { + std::string status; + + // v2 API: status is nested under data envelope + if (getJobResponse.contains("data") && getJobResponse["data"].contains("status")) { + status = getJobResponse["data"]["status"].get(); + cudaq::info("Job status from v2 data envelope: {}", status); + } else if (getJobResponse.contains("status")) { + // Fallback: direct status field + status = getJobResponse["status"].get(); + cudaq::info("Job status from direct response: {}", status); + } else { + cudaq::info("Unexpected job response format: {}", getJobResponse.dump()); + throw std::runtime_error("Invalid job response format"); + } + + if (status == "FAILED" || status == "COMPLETED" || status == "CANCELLED") { + saveResponseToFile(getJobResponse); + return true; + } + + return false; + } + + // Fetch the original program from v2 endpoint + std::string getJobProgram(const ServerMessage &response, const std::string &jobId) override { + auto programPath = constructGetProgramPath(jobId); + auto headers = getHeaders(); + + cudaq::info("Fetching job program from v2 endpoint: {}", programPath); + RestClient client; + auto programJson = client.get("", programPath, headers, true); + + // v2 API: program content at data.data, format at data.format + if (programJson.contains("data") && programJson["data"].contains("data")) { + cudaq::info("Retrieved program (format: {})", + programJson["data"].value("format", "unknown")); + return programJson["data"]["data"].get(); + } + + throw std::runtime_error("Invalid program response format: " + programJson.dump()); + } + + // Fetch results from v2 results endpoint with retry logic + cudaq::sample_result processResults(ServerMessage &getJobResponse, std::string &jobId) override { + int maxRetries = 5; + int waitTime = 2; + float backoffFactor = 2.0; + + for (int attempt = 0; attempt < maxRetries; ++attempt) { + try { + auto resultsPath = constructGetResultsPath(jobId); + auto headers = getHeaders(); + + cudaq::info("Fetching results from v2 endpoint (attempt {}/{}): {}", attempt + 1, maxRetries, resultsPath); + RestClient client; + auto resultJson = client.get("", resultsPath, headers, true); + + // v2 API: error indicated by success=false + if (resultJson.contains("success") && resultJson["success"].is_boolean() + && !resultJson["success"].get()) { + std::string errorMsg = "Results not yet available"; + if (resultJson.contains("data") && resultJson["data"].contains("message")) { + errorMsg = resultJson["data"]["message"].get(); + } + cudaq::info("Results endpoint returned success=false: {}", errorMsg); + + if (attempt == maxRetries - 1) { + throw std::runtime_error("Error retrieving results: " + errorMsg); + } + } + // v2 API: measurementCounts nested under data.resultData + else if (resultJson.contains("data") + && resultJson["data"].contains("resultData") + && resultJson["data"]["resultData"].contains("measurementCounts")) { + cudaq::info("Processing results from v2 endpoint"); + CountsDictionary counts; + auto &measurements = resultJson["data"]["resultData"]["measurementCounts"]; + + for (const auto &[bitstring, count] : measurements.items()) { + counts[bitstring] = + count.is_number() + ? static_cast(count.get()) + : static_cast(count); + } + + std::vector execResults; + execResults.emplace_back(ExecutionResult{counts}); + return cudaq::sample_result(execResults); + } + + // No valid data yet and no explicit error - retry + if (attempt < maxRetries - 1) { + int sleepTime = (attempt == 0) ? waitTime : waitTime * std::pow(backoffFactor, attempt); + cudaq::info("No valid results yet, retrying in {} seconds", sleepTime); + std::this_thread::sleep_for(std::chrono::seconds(sleepTime)); + } + + } catch (const std::exception &e) { + cudaq::info("Exception when fetching results: {}", e.what()); + if (attempt < maxRetries - 1) { + int sleepTime = (attempt == 0) ? waitTime : waitTime * std::pow(backoffFactor, attempt); + cudaq::info("Retrying in {} seconds", sleepTime); + std::this_thread::sleep_for(std::chrono::seconds(sleepTime)); + } + } + } + + throw std::runtime_error("Failed to retrieve measurement counts after " + + std::to_string(maxRetries) + " attempts"); + } + + /// @brief Override the polling interval method + std::chrono::microseconds + nextResultPollingInterval(ServerMessage &postResponse) override { + return std::chrono::seconds(1); + } + +private: + void saveResponseToFile(const ServerMessage &response, const std::string &identifier = "") { + try { + std::string outputDir = backendConfig.at("results_output_dir"); + std::string filePrefix = backendConfig.at("results_file_prefix"); + + // Create a unique filename using timestamp if no identifier is provided + std::string filename; + if (identifier.empty()) { + auto now = std::chrono::system_clock::now(); + auto timestamp = std::chrono::duration_cast(now.time_since_epoch()).count(); + filename = outputDir + "/" + filePrefix + std::to_string(timestamp) + ".json"; + } else { + filename = outputDir + "/" + filePrefix + identifier + ".json"; + } + + std::ofstream outputFile(filename); + if (!outputFile.is_open()) { + cudaq::info("Failed to open file for writing: {}", filename); + return; + } + + outputFile << response.dump(2); + outputFile.close(); + + cudaq::info("Response saved to file: {}", filename); + } catch (const std::exception &e) { + cudaq::info("Error saving response to file: {}", e.what()); + } + } + + RestHeaders getHeaders() override { + if (backendConfig.find("api_key") == backendConfig.end()) { + throw std::runtime_error("API key not found in config. Was initialize() called?"); + } + + RestHeaders headers; + headers["X-API-KEY"] = backendConfig.at("api_key"); + headers["Content-Type"] = "application/json"; + headers["User-Agent"] = backendConfig.at("user_agent"); + return headers; + } + + std::string getEnvVar(const std::string &key, const std::string &defaultVal, const bool isRequired) const { + const char *env_var = std::getenv(key.c_str()); + if (env_var == nullptr) { + if (isRequired) { + throw std::runtime_error(key + " environment variable is not set."); + } + + return defaultVal; + } + return std::string(env_var); + } + + std::string getValueOrDefault(const BackendConfig &config, + const std::string &key, + const std::string &defaultValue) const { + return config.find(key) != config.end() ? config.at(key) : defaultValue; + } +}; +} // namespace cudaq + +CUDAQ_REGISTER_TYPE(cudaq::ServerHelper, cudaq::QbraidServerHelper, qbraid) diff --git a/runtime/cudaq/platform/default/rest/helpers/qbraid/qbraid.yml b/runtime/cudaq/platform/default/rest/helpers/qbraid/qbraid.yml new file mode 100644 index 00000000000..99a0f17ee7d --- /dev/null +++ b/runtime/cudaq/platform/default/rest/helpers/qbraid/qbraid.yml @@ -0,0 +1,35 @@ +# ============================================================================ # +# Copyright (c) 2022 - 2024 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +name: qbraid +description: "CUDA-Q target for qBraid." +config: + # Tell DefaultQuantumPlatform what QPU subtype to use + platform-qpu: remote_rest + # Tell NVQ++ to generate glue code to set the target backend name + gen-target-backend: true + # Add the rest-qpu library to the link list + link-libs: ["-lcudaq-rest-qpu"] + # Define the lowering pipeline + platform-lowering-config: "classical-optimization-pipeline,globalize-array-values,func.func(state-prep),unitary-synthesis,canonicalize,apply-op-specialization,aggressive-early-inlining,classical-optimization-pipeline,func.func(lower-to-cfg),canonicalize,func.func(multicontrol-decomposition),decomposition{enable-patterns=SToR1,TToR1,CCZToCX,CRyToCX,CRxToCX,R1AdjToR1,RxAdjToRx,RyAdjToRy,RzAdjToRz},quake-to-cc-prep,func.func(memtoreg{quantum=0}),symbol-dce" + # Tell the rest-qpu that we are generating OpenQASM. + codegen-emission: qasm2 + # Library mode is only for simulators, physical backends must turn this off + library-mode: false + +target-arguments: + - key: machine + required: false + type: string + platform-arg: qpu + help-string: "Specify the qBraid QPU." + - key: api_key + required: false + type: string + platform-arg: api_key + help-string: "Specify the qBraid API key." diff --git a/targettests/execution/cudaq_observe-cpp17.cpp b/targettests/execution/cudaq_observe-cpp17.cpp new file mode 100644 index 00000000000..ffd05d7780f --- /dev/null +++ b/targettests/execution/cudaq_observe-cpp17.cpp @@ -0,0 +1,56 @@ +/******************************************************************************* + * Copyright (c) 2022 - 2025 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +// REQUIRES: c++17 +// clang-format off +// RUN: nvq++ %cpp_std --target infleqtion --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target ionq --emulate %s -o %t && %t | FileCheck %s +// 2 different IQM machines for 2 different topologies +// RUN: nvq++ %cpp_std --target iqm --iqm-machine Adonis --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target iqm --iqm-machine Apollo --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target oqc --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target qbraid --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target quantinuum --emulate %s -o %t && %t | FileCheck %s +// clang-format on + +#include +#include + +// The example here shows a simple use case for the `cudaq::observe` +// function in computing expected values of provided spin_ops. + +struct ansatz { + auto operator()(double theta) __qpu__ { + cudaq::qvector q(2); + x(q[0]); + ry(theta, q[1]); + cx(q[1], q[0]); + } +}; + +int main() { + + // Build up your spin op algebraically + cudaq::spin_op h = 5.907 - 2.1433 * cudaq::spin_op::x(0) * cudaq::spin_op::x(1) - + 2.1433 * cudaq::spin_op::y(0) * cudaq::spin_op::y(1) + + .21829 * cudaq::spin_op::z(0) - 6.125 * cudaq::spin_op::z(1); + + // Make repeatable for shots-based emulation + cudaq::set_random_seed(13); + + // Observe takes the kernel, the spin_op, and the concrete + // parameters for the kernel + double energy = cudaq::observe(ansatz{}, h, .59); + printf("Energy is %.16lf\n", energy); + return 0; +} + +// Note: seeds 2 and 12 will push this to -2 instead of -1. All all other +// seeds in 1-100 range will be -1.x. + +// CHECK: Energy is -1. diff --git a/targettests/qbraid/bug_qubit.cpp b/targettests/qbraid/bug_qubit.cpp new file mode 100644 index 00000000000..2179c9f4da1 --- /dev/null +++ b/targettests/qbraid/bug_qubit.cpp @@ -0,0 +1,50 @@ +/******************************************************************************* + * Copyright (c) 2022 - 2025 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +// This code is from Issue 251. + +// clang-format off +// RUN: nvq++ %cpp_std --target anyon --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target infleqtion --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target ionq --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target iqm --iqm-machine Adonis --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target oqc --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target qbraid --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target quantinuum --emulate %s -o %t && %t | FileCheck %s +// RUN: if %braket_avail; then nvq++ %cpp_std --target braket --emulate %s -o %t && %t | FileCheck %s; fi +// RUN: nvq++ -std=c++17 --enable-mlir %s -o %t +// RUN: cudaq-quake %cpp_std %s | cudaq-opt --promote-qubit-allocation | FileCheck --check-prefixes=MLIR %s + +#include +#include + +struct simple_x { + void operator()() __qpu__ { + cudaq::qubit q; + x(q); + mz(q); + } +}; + +// MLIR-LABEL: func.func @__nvqpp__mlirgen__simple_x() +// MLIR-NOT: quake.alloca !quake.ref +// MLIR: %[[VAL_0:.*]] = quake.alloca !quake.veq<1> +// MLIR-NEXT: %[[VAL_1:.*]] = quake.extract_ref %[[VAL_0]][0] : (!quake.veq<1>) -> !quake.ref + +int main() { + auto result = cudaq::sample(simple_x{}); + +#ifndef SYNTAX_CHECK + std::cout << result.most_probable() << '\n'; + assert("1" == result.most_probable()); +#endif + + return 0; +} + +// CHECK: 1 diff --git a/targettests/qbraid/callable_kernel_arg.cpp b/targettests/qbraid/callable_kernel_arg.cpp new file mode 100644 index 00000000000..759469537e7 --- /dev/null +++ b/targettests/qbraid/callable_kernel_arg.cpp @@ -0,0 +1,50 @@ +/******************************************************************************* + * Copyright (c) 2022 - 2025 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +// clang-format off +// RUN: nvq++ %cpp_std --target anyon --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target infleqtion --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target ionq --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target iqm --iqm-machine Adonis --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target oqc --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target qbraid --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target quantinuum --emulate %s -o %t && %t | FileCheck %s +// RUN: if %braket_avail; then nvq++ %cpp_std --target braket --emulate %s -o %t && %t | FileCheck %s; fi +// RUN: nvq++ -std=c++17 --enable-mlir %s -o %t +// clang-format on + +#include +#include + +__qpu__ void bar(cudaq::qubit &q) { x(q); } + +struct baz { + __qpu__ void operator()(cudaq::qubit &q) { x(q); } +}; + +struct foo { + template + __qpu__ void operator()(CallableKernel &&func, int size) { + cudaq::qvector q(size); + func(q[0]); + auto result = mz(q[0]); + } +}; + +int main() { + auto result = cudaq::sample(1000, foo{}, baz{}, /*qreg size*/ 1); + +#ifndef SYNTAX_CHECK + std::cout << result.most_probable() << '\n'; + assert("1" == result.most_probable()); +#endif + + return 0; +} + +// CHECK: 1 diff --git a/targettests/qbraid/cudaq_observe.cpp b/targettests/qbraid/cudaq_observe.cpp new file mode 100644 index 00000000000..d9d1c537d85 --- /dev/null +++ b/targettests/qbraid/cudaq_observe.cpp @@ -0,0 +1,57 @@ +/******************************************************************************* + * Copyright (c) 2022 - 2025 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +// REQUIRES: c++20 +// clang-format off +// RUN: nvq++ %cpp_std --target infleqtion --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ --target ionq --emulate %s -o %t && %t | FileCheck %s +// 2 different IQM machines for 2 different topologies +// RUN: nvq++ --target iqm --iqm-machine Adonis --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ --target iqm --iqm-machine Apollo --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ --target oqc --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ --target qbraid --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ --target quantinuum --emulate %s -o %t && %t | FileCheck %s +// RUN: if %braket_avail; then nvq++ --target braket --emulate %s -o %t && %t | FileCheck %s; fi +// clang-format on + +#include +#include + +// The example here shows a simple use case for the `cudaq::observe` +// function in computing expected values of provided spin_ops. + +struct ansatz { + auto operator()(double theta) __qpu__ { + cudaq::qvector q(2); + x(q[0]); + ry(theta, q[1]); + x(q[1], q[0]); + } +}; + +int main() { + + // Build up your spin op algebraically + using namespace cudaq::spin; + cudaq::spin_op h = 5.907 - 2.1433 * x(0) * x(1) - 2.1433 * y(0) * y(1) + + .21829 * z(0) - 6.125 * z(1); + + // Make repeatable for shots-based emulation + cudaq::set_random_seed(13); + + // Observe takes the kernel, the spin_op, and the concrete + // parameters for the kernel + double energy = cudaq::observe(ansatz{}, h, .59); + printf("Energy is %.16lf\n", energy); + return 0; +} + +// Note: seeds 2 and 12 will push this to -2 instead of -1. All other seeds in +// 1-100 range will be -1.x. + +// CHECK: Energy is -1. diff --git a/targettests/qbraid/if_jit.cpp b/targettests/qbraid/if_jit.cpp new file mode 100644 index 00000000000..5719dc5b770 --- /dev/null +++ b/targettests/qbraid/if_jit.cpp @@ -0,0 +1,45 @@ +/******************************************************************************* + * Copyright (c) 2022 - 2025 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +// This code is from Issue 296. + +// clang-format off +// RUN: nvq++ %cpp_std --target anyon --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target infleqtion --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target ionq --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target iqm --iqm-machine Adonis --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target oqc --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target qbraid --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target quantinuum --emulate %s -o %t && %t | FileCheck %s +// RUN: if %braket_avail; then nvq++ %cpp_std --target braket --emulate %s -o %t && %t | FileCheck %s; fi +// RUN: nvq++ -std=c++17 --enable-mlir %s -o %t +// clang-format on + +#include +#include + +__qpu__ void foo(bool value) { + cudaq::qubit q; + if (value) + x(q); + + mz(q); +} + +int main() { + auto result = cudaq::sample(100, foo, true); + +#ifndef SYNTAX_CHECK + std::cout << result.most_probable() << '\n'; + assert("1" == result.most_probable()); +#endif + + return 0; +} + +// CHECK: 1 diff --git a/targettests/qbraid/load_value.cpp b/targettests/qbraid/load_value.cpp new file mode 100644 index 00000000000..ab5d9cec62e --- /dev/null +++ b/targettests/qbraid/load_value.cpp @@ -0,0 +1,63 @@ +/******************************************************************************* + * Copyright (c) 2022 - 2025 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +// clang-format off +// RUN: nvq++ %cpp_std --target anyon --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target infleqtion --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target ionq --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target iqm --iqm-machine Adonis --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target iqm --iqm-machine Apollo --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target oqc --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target qbraid --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target quantinuum --emulate %s -o %t && %t | FileCheck %s +// RUN: if %braket_avail; then nvq++ %cpp_std --target braket --emulate %s -o %t && %t | FileCheck %s; fi +// RUN: nvq++ -std=c++17 --enable-mlir %s -o %t +// clang-format on + +#include +#include + +__qpu__ void load_value(unsigned value) { + cudaq::qvector qubits(4); + for (std::size_t i = 0; i < 4; ++i) { + // Doesn't work, even with: `if (value)` + if (value & (1 << i)) + x(qubits[3 - i]); + } + + mz(qubits); +} + +int main() { + for (auto i = 0; i < 16; ++i) { + auto result = cudaq::sample(1000, load_value, i); + +#ifndef SYNTAX_CHECK + std::cout << result.most_probable() << '\n'; + assert(i == std::stoi(result.most_probable(), nullptr, 2)); +#endif + } + return 0; +} + +// CHECK: 0000 +// CHECK-NEXT: 0001 +// CHECK-NEXT: 0010 +// CHECK-NEXT: 0011 +// CHECK-NEXT: 0100 +// CHECK-NEXT: 0101 +// CHECK-NEXT: 0110 +// CHECK-NEXT: 0111 +// CHECK-NEXT: 1000 +// CHECK-NEXT: 1001 +// CHECK-NEXT: 1010 +// CHECK-NEXT: 1011 +// CHECK-NEXT: 1100 +// CHECK-NEXT: 1101 +// CHECK-NEXT: 1110 +// CHECK-NEXT: 1111 diff --git a/targettests/qbraid/sudoku_2x2-1.cpp b/targettests/qbraid/sudoku_2x2-1.cpp new file mode 100644 index 00000000000..cd028025a0c --- /dev/null +++ b/targettests/qbraid/sudoku_2x2-1.cpp @@ -0,0 +1,79 @@ +/******************************************************************************* + * Copyright (c) 2022 - 2025 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +// REQUIRES: c++20 +// clang-format off +// RUN: nvq++ --target anyon --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ --target infleqtion --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ --target ionq --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ --target iqm --iqm-machine Apollo --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ --target oqc --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ --target qbraid --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ --target quantinuum --emulate %s -o %t && %t | FileCheck %s +// clang-format on + +#include +#include +#include +#include + +__qpu__ void reflect_uniform(cudaq::qvector<> &qubits) { + h(qubits); + x(qubits); + z(qubits[0], qubits[1], qubits[2], qubits[3]); + x(qubits); + h(qubits); +} + +__qpu__ void oracle(cudaq::qvector<> &cs, cudaq::qubit &target) { + x(cs[0], !cs[1], !cs[2], cs[3], target); + x(!cs[0], cs[1], cs[2], !cs[3], target); +} + +__qpu__ void grover() { + cudaq::qvector qubits(4); + cudaq::qubit ancilla; + + // Initialization + x(ancilla); + h(ancilla); + h(qubits); // uniform initialization + + // Don't work?: + for (int i = 0; i < 2; ++i) { + oracle(qubits, ancilla); + reflect_uniform(qubits); + } + + mz(qubits); +}; + +int main() { + auto result = cudaq::sample(1000, grover); + +#ifndef SYNTAX_CHECK + std::vector strings; + for (auto &&[bits, count] : result) { + strings.push_back(bits); + } + std::sort(strings.begin(), strings.end(), [&](auto &a, auto &b) { + return result.count(a) > result.count(b); + }); + std::cout << strings[0] << '\n'; + std::cout << strings[1] << '\n'; + + std::unordered_set most_probable{strings[0], strings[1]}; + assert(most_probable.count("1001") == 1); + assert(most_probable.count("0110") == 1); +#endif + + return 0; +} + +// CHECK-DAG: 1001 +// CHECK-DAG: 0110 diff --git a/targettests/qbraid/sudoku_2x2-bit_names.cpp b/targettests/qbraid/sudoku_2x2-bit_names.cpp new file mode 100644 index 00000000000..ef53021b359 --- /dev/null +++ b/targettests/qbraid/sudoku_2x2-bit_names.cpp @@ -0,0 +1,103 @@ +/******************************************************************************* + * Copyright (c) 2022 - 2025 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +// REQUIRES: c++20 +// clang-format off +// RUN: nvq++ --target anyon --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ --target infleqtion --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ --target ionq --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ --target iqm --iqm-machine Apollo --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ --target oqc --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ --target qbraid --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ --target quantinuum --emulate %s -o %t && %t | FileCheck %s +// clang-format on + +#include +#include +#include +#include + +__qpu__ void reflect_uniform(cudaq::qvector<> &qubits) { + h(qubits); + x(qubits); + z(qubits[0], qubits[1], qubits[2], qubits[3]); + x(qubits); + h(qubits); +} + +__qpu__ void oracle(cudaq::qvector<> &cs, cudaq::qubit &target) { + x(cs[0], !cs[1], !cs[2], cs[3], target); + x(!cs[0], cs[1], cs[2], !cs[3], target); +} + +__qpu__ void grover() { + cudaq::qvector qubits(4); + cudaq::qubit ancilla; + + // Initialization + x(ancilla); + h(ancilla); + h(qubits); // uniform initialization + + oracle(qubits, ancilla); + reflect_uniform(qubits); + oracle(qubits, ancilla); + reflect_uniform(qubits); + + auto groverQubits0 = mz(qubits[0]); + auto groverQubits1 = mz(qubits[1]); + auto groverQubits2 = mz(qubits[2]); + auto groverQubits3 = mz(qubits[3]); +}; + +int main() { + auto result = cudaq::sample(1000, grover); + result.dump(); + + auto& platform = cudaq::get_platform(); + if (platform.is_remote() || platform.is_emulated()) { + // Make sure that the get_marginal() results for the individual register names + // match the subset of the bits from the global register. + // Note that this will fail if you only compile this in library mode. + auto numBits = result.begin()->first.size(); + std::cout << "Checking " << numBits << " bits against global register\n"; + for (size_t b = 0; b < numBits; b++) { + auto regName = "groverQubits" + std::to_string(b); + auto valFromRegName = result.get_marginal({0}, regName); + auto valFromGlobal = result.get_marginal({b}); + if (valFromRegName.to_map() != valFromGlobal.to_map()) { + std::cout << "--- MISMATCH DETECTED in bit " << b << " ---\n"; + valFromRegName.dump(); + valFromGlobal.dump(); + // Mark test failure + assert(valFromRegName.to_map() == valFromGlobal.to_map()); + } + } + } + +#ifndef SYNTAX_CHECK + std::vector strings; + for (auto &&[bits, count] : result) { + strings.push_back(bits); + } + std::sort(strings.begin(), strings.end(), [&](auto& a, auto& b) { + return result.count(a) > result.count(b); + }); + std::cout << strings[0] << '\n'; + std::cout << strings[1] << '\n'; + + std::unordered_set most_probable{strings[0], strings[1]}; + assert(most_probable.count("1001") == 1); + assert(most_probable.count("0110") == 1); +#endif + + return 0; +} + +// CHECK-DAG: 1001 +// CHECK-DAG: 0110 diff --git a/targettests/qbraid/sudoku_2x2-reg_name.cpp b/targettests/qbraid/sudoku_2x2-reg_name.cpp new file mode 100644 index 00000000000..6200c1070f7 --- /dev/null +++ b/targettests/qbraid/sudoku_2x2-reg_name.cpp @@ -0,0 +1,79 @@ +/******************************************************************************* + * Copyright (c) 2022 - 2025 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +// REQUIRES: c++20 +// clang-format off +// RUN: nvq++ --target anyon --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ --target infleqtion --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ --target ionq --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ --target iqm --iqm-machine Apollo --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ --target oqc --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ --target qbraid --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ --target quantinuum --emulate %s -o %t && %t | FileCheck %s +// clang-format on + +#include +#include +#include +#include + +__qpu__ void reflect_uniform(cudaq::qvector<> &qubits) { + h(qubits); + x(qubits); + z(qubits[0], qubits[1], qubits[2], qubits[3]); + x(qubits); + h(qubits); +} + +__qpu__ void oracle(cudaq::qvector<> &cs, cudaq::qubit &target) { + x(cs[0], !cs[1], !cs[2], cs[3], target); + x(!cs[0], cs[1], cs[2], !cs[3], target); +} + +__qpu__ void grover() { + cudaq::qvector qubits(4); + cudaq::qubit ancilla; + + // Initialization + x(ancilla); + h(ancilla); + h(qubits); // uniform initialization + + oracle(qubits, ancilla); + reflect_uniform(qubits); + oracle(qubits, ancilla); + reflect_uniform(qubits); + + auto groverQubits = mz(qubits); +}; + +int main() { + auto result = cudaq::sample(1000, grover); + result.dump(); + +#ifndef SYNTAX_CHECK + std::vector strings; + for (auto &&[bits, count] : result) { + strings.push_back(bits); + } + std::sort(strings.begin(), strings.end(), [&](auto& a, auto& b) { + return result.count(a) > result.count(b); + }); + std::cout << strings[0] << '\n'; + std::cout << strings[1] << '\n'; + + std::unordered_set most_probable{strings[0], strings[1]}; + assert(most_probable.count("1001") == 1); + assert(most_probable.count("0110") == 1); +#endif + + return 0; +} + +// CHECK-DAG: 1001 +// CHECK-DAG: 0110 diff --git a/targettests/qbraid/sudoku_2x2.cpp b/targettests/qbraid/sudoku_2x2.cpp new file mode 100644 index 00000000000..e3d4bc2c0c3 --- /dev/null +++ b/targettests/qbraid/sudoku_2x2.cpp @@ -0,0 +1,78 @@ +/******************************************************************************* + * Copyright (c) 2022 - 2025 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +// REQUIRES: c++20 +// clang-format off +// RUN: nvq++ --target anyon --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ --target infleqtion --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ --target ionq --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ --target iqm --iqm-machine Apollo --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ --target oqc --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ --target qbraid --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ --target quantinuum --emulate %s -o %t && %t | FileCheck %s +// clang-format on + +#include +#include +#include +#include + +__qpu__ void reflect_uniform(cudaq::qvector<> &qubits) { + h(qubits); + x(qubits); + z(qubits[0], qubits[1], qubits[2], qubits[3]); + x(qubits); + h(qubits); +} + +__qpu__ void oracle(cudaq::qvector<> &cs, cudaq::qubit &target) { + x(cs[0], !cs[1], !cs[2], cs[3], target); + x(!cs[0], cs[1], cs[2], !cs[3], target); +} + +__qpu__ void grover() { + cudaq::qvector qubits(4); + cudaq::qubit ancilla; + + // Initialization + x(ancilla); + h(ancilla); + h(qubits); // uniform initialization + + oracle(qubits, ancilla); + reflect_uniform(qubits); + oracle(qubits, ancilla); + reflect_uniform(qubits); + + mz(qubits); +}; + +int main() { + auto result = cudaq::sample(1000, grover); + +#ifndef SYNTAX_CHECK + std::vector strings; + for (auto &&[bits, count] : result) { + strings.push_back(bits); + } + std::sort(strings.begin(), strings.end(), [&](auto& a, auto& b) { + return result.count(a) > result.count(b); + }); + std::cout << strings[0] << '\n'; + std::cout << strings[1] << '\n'; + + std::unordered_set most_probable{strings[0], strings[1]}; + assert(most_probable.count("1001") == 1); + assert(most_probable.count("0110") == 1); +#endif + + return 0; +} + +// CHECK-DAG: 1001 +// CHECK-DAG: 0110 diff --git a/targettests/qbraid/swap_gate.cpp b/targettests/qbraid/swap_gate.cpp new file mode 100644 index 00000000000..4f37edae871 --- /dev/null +++ b/targettests/qbraid/swap_gate.cpp @@ -0,0 +1,43 @@ +/******************************************************************************* + * Copyright (c) 2022 - 2025 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +// clang-format off +// RUN: nvq++ %cpp_std --target anyon --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target infleqtion --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target ionq --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target iqm --iqm-machine Adonis --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target oqc --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target qbraid --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target quantinuum --emulate %s -o %t && %t | FileCheck %s +// RUN: if %braket_avail; then nvq++ %cpp_std --target braket --emulate %s -o %t && %t | FileCheck %s; fi +// RUN: nvq++ -std=c++17 --enable-mlir %s -o %t && %t | FileCheck %s + +#include "cudaq.h" +#include + +int main() { + + auto swapKernel = []() __qpu__ { + cudaq::qvector q(2); + x(q[0]); + swap(q[0], q[1]); + + mz(q); + }; + + auto counts = cudaq::sample(swapKernel); + +#ifndef SYNTAX_CHECK + std::cout << counts.most_probable() << '\n'; + assert("01" == counts.most_probable()); +#endif + + return 0; +} + +// CHECK: 01 diff --git a/targettests/qbraid/test-int8_t.cpp b/targettests/qbraid/test-int8_t.cpp new file mode 100644 index 00000000000..7178f6c57bb --- /dev/null +++ b/targettests/qbraid/test-int8_t.cpp @@ -0,0 +1,48 @@ +/******************************************************************************* + * Copyright (c) 2022 - 2025 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +// clang-format off +// RUN: nvq++ %cpp_std --target anyon --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target infleqtion --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target ionq --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target iqm --iqm-machine Adonis --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target oqc --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target qbraid --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target quantinuum --emulate %s -o %t && %t | FileCheck %s +// RUN: if %braket_avail; then nvq++ %cpp_std --target braket --emulate %s -o %t && %t | FileCheck %s; fi +// RUN: nvq++ -std=c++17 --enable-mlir %s -o %t +// clang-format on + +#include +#include + +struct variable_qreg { + __qpu__ void operator()(std::uint8_t value) { + cudaq::qvector qubits(value); + + mz(qubits); + } +}; + +int main() { + for (auto i = 1; i < 5; ++i) { + auto result = cudaq::sample(1000, variable_qreg{}, i); + +#ifndef SYNTAX_CHECK + std::cout << result.most_probable() << '\n'; + assert(std::string(i, '0') == result.most_probable()); +#endif + } + + return 0; +} + +// CHECK: 0 +// CHECK: 00 +// CHECK: 000 +// CHECK: 0000 diff --git a/targettests/qbraid/test-int8_t_free_func.cpp b/targettests/qbraid/test-int8_t_free_func.cpp new file mode 100644 index 00000000000..ca9db25ec6c --- /dev/null +++ b/targettests/qbraid/test-int8_t_free_func.cpp @@ -0,0 +1,46 @@ +/******************************************************************************* + * Copyright (c) 2022 - 2025 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +// clang-format off +// RUN: nvq++ %cpp_std --target anyon --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target infleqtion --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target ionq --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target iqm --iqm-machine Adonis --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target oqc --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target qbraid --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target quantinuum --emulate %s -o %t && %t | FileCheck %s +// RUN: if %braket_avail; then nvq++ %cpp_std --target braket --emulate %s -o %t && %t | FileCheck %s; fi +// RUN: nvq++ -std=c++17 --enable-mlir %s -o %t +// clang-format on + +#include +#include + +__qpu__ void variable_qreg(std::uint8_t value) { + cudaq::qvector qubits(value); + + mz(qubits); +} + +int main() { + for (auto i = 1; i < 5; ++i) { + auto result = cudaq::sample(1000, variable_qreg, i); + +#ifndef SYNTAX_CHECK + std::cout << result.most_probable() << '\n'; + assert(std::string(i, '0') == result.most_probable()); +#endif + } + + return 0; +} + +// CHECK: 0 +// CHECK-NEXT: 00 +// CHECK-NEXT: 000 +// CHECK-NEXT: 0000 diff --git a/targettests/qbraid/variable_size_qreg.cpp b/targettests/qbraid/variable_size_qreg.cpp new file mode 100644 index 00000000000..1f6c139a085 --- /dev/null +++ b/targettests/qbraid/variable_size_qreg.cpp @@ -0,0 +1,46 @@ +/******************************************************************************* + * Copyright (c) 2022 - 2025 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +// clang-format off +// RUN: nvq++ %cpp_std --target anyon --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target infleqtion --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target ionq --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target iqm --iqm-machine Adonis --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target oqc --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target qbraid --emulate %s -o %t && %t | FileCheck %s +// RUN: nvq++ %cpp_std --target quantinuum --emulate %s -o %t && %t | FileCheck %s +// RUN: if %braket_avail; then nvq++ %cpp_std --target braket --emulate %s -o %t && %t | FileCheck %s; fi +// RUN: nvq++ -std=c++17 --enable-mlir %s -o %t +// clang-format on + +#include +#include + +__qpu__ void variable_qreg(unsigned value) { + cudaq::qvector qubits(value); + + mz(qubits); +} + +int main() { + for (auto i = 1; i < 5; ++i) { + auto result = cudaq::sample(1000, variable_qreg, i); + +#ifndef SYNTAX_CHECK + std::cout << result.most_probable() << '\n'; + assert(std::string(i, '0') == result.most_probable()); +#endif + } + + return 0; +} + +// CHECK: 0 +// CHECK-NEXT: 00 +// CHECK-NEXT: 000 +// CHECK-NEXT: 0000 diff --git a/tpls/Stim b/tpls/Stim index 42e0b9e0991..47190f4a3af 160000 --- a/tpls/Stim +++ b/tpls/Stim @@ -1 +1 @@ -Subproject commit 42e0b9e099180e8570407c33f87b4683cac00d81 +Subproject commit 47190f4a3afb104c9f0068d0be9fea87d2894a70 diff --git a/tpls/cpr b/tpls/cpr index d202b82fbcc..871ed52d350 160000 --- a/tpls/cpr +++ b/tpls/cpr @@ -1 +1 @@ -Subproject commit d202b82fbccf897604a18e035c09e1330dffd082 +Subproject commit 871ed52d350214a034f6ef8a3b8f51c5ce1bd400 diff --git a/tpls/fmt b/tpls/fmt index fc8d07cfe54..ba50c19e827 160000 --- a/tpls/fmt +++ b/tpls/fmt @@ -1 +1 @@ -Subproject commit fc8d07cfe54ba9f5019453dfdb112491246ee017 +Subproject commit ba50c19e827383bd5dacb74189fb4852c8dcbdae diff --git a/tpls/spdlog b/tpls/spdlog index 287333ee005..edc51df1bda 160000 --- a/tpls/spdlog +++ b/tpls/spdlog @@ -1 +1 @@ -Subproject commit 287333ee00555aaece5a5cf6acc9040563c6f642 +Subproject commit edc51df1bdad8667b628999394a1e7c4dc6f3658 diff --git a/unittests/backends/CMakeLists.txt b/unittests/backends/CMakeLists.txt index 6edfb2efd2d..604f269256e 100644 --- a/unittests/backends/CMakeLists.txt +++ b/unittests/backends/CMakeLists.txt @@ -8,15 +8,15 @@ # List of libraries to link with by default to create a test executable set(default_backend_unittest_libs - fmt::fmt-header-only - cudaq-common + fmt::fmt-header-only + cudaq-common cudaq cudaq-builder cudaq-mlir-runtime cudaq-rest-qpu cudaq-operator nvqir nvqir-qpp - cudaq-platform-default + cudaq-platform-default gtest_main) define_property(DIRECTORY PROPERTY BACKEND_UNITTEST_LIBS INHERITED @@ -32,12 +32,12 @@ set_property(DIRECTORY PROPERTY BACKEND_UNITTEST_LIBS ${default_backend_unittest # Helper function to create an executable to be used by the gtest unit tests # - target: positional argument, name of the executable # - BACKEND: named argument to specify a prefix for the test names -# - BACKEND_CONFIG: if present, the test will set NVQPP_TARGET_BACKEND_CONFIG +# - BACKEND_CONFIG: if present, the test will set NVQPP_TARGET_BACKEND_CONFIG # with this value so the backend gets loaded by a constructor before entering main. # To avoid issues with semicolon the format is: backend key1=value1 key2=value2 # The function will convert this to : backend;key1;value1;key2;value2 # Example: infleqtion emulate=false url=http://localhost:62447 -# - LINK_LIBS: optional argument to provide non-default list of libraries to link with +# - LINK_LIBS: optional argument to provide non-default list of libraries to link with function(add_backend_unittest_executable target) set(singleValues BACKEND BACKEND_CONFIG) set(multiValues SOURCES INCLUDES LINK_LIBS) @@ -97,6 +97,9 @@ if (OPENSSL_FOUND AND CUDAQ_ENABLE_PYTHON AND CUDAQ_TEST_MOCK_SERVERS) if (CUDAQ_ENABLE_SCALEWAY_BACKEND) add_subdirectory(scaleway) endif() + if (CUDAQ_ENABLE_QBRAID_BACKEND) + add_subdirectory(qbraid) + endif() add_subdirectory(extra_payload_provider) endif() add_subdirectory(pasqal) diff --git a/unittests/backends/qbraid/CMakeLists.txt b/unittests/backends/qbraid/CMakeLists.txt new file mode 100644 index 00000000000..05ca3c19550 --- /dev/null +++ b/unittests/backends/qbraid/CMakeLists.txt @@ -0,0 +1,27 @@ +# ============================================================================ # +# Copyright (c) 2022 - 2025 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +add_executable(test_qbraid QbraidTester.cpp) +if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND NOT APPLE) + target_link_options(test_qbraid PRIVATE -Wl,--no-as-needed) +endif() +target_compile_definitions(test_qbraid PRIVATE -DNVQIR_BACKEND_NAME=qbraid) +target_include_directories(test_qbraid PRIVATE ../..) +target_link_libraries(test_qbraid + PRIVATE fmt::fmt-header-only + cudaq-common + cudaq + cudaq-builder + cudaq-mlir-runtime + cudaq-rest-qpu + cudaq-platform-default + gtest_main) + + +configure_file("QbraidStartServerAndTest.sh.in" "${CMAKE_BINARY_DIR}/unittests/backends/qbraid/QbraidStartServerAndTest.sh" @ONLY) +add_test(NAME qbraid-tests COMMAND bash QbraidStartServerAndTest.sh WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/unittests/backends/qbraid/) diff --git a/unittests/backends/qbraid/QbraidStartServerAndTest.sh.in b/unittests/backends/qbraid/QbraidStartServerAndTest.sh.in new file mode 100644 index 00000000000..72ec44e9433 --- /dev/null +++ b/unittests/backends/qbraid/QbraidStartServerAndTest.sh.in @@ -0,0 +1,43 @@ +#!/bin/bash + +# ============================================================================ # +# Copyright (c) 2022 - 2025 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +checkServerConnection() { + PYTHONPATH=@CMAKE_BINARY_DIR@/python @Python_EXECUTABLE@ - << EOF +import socket +try: + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.connect(("localhost", 62452)) + s.close() +except Exception: + exit(1) +EOF +} + +# Launch the fake server +PYTHONPATH=@CMAKE_BINARY_DIR@/python @Python_EXECUTABLE@ @CMAKE_SOURCE_DIR@/utils/mock_qpu/qbraid/__init__.py & +# we'll need the process id to kill it +pid=$(echo "$!") +n=0 +while ! checkServerConnection; do + sleep 1 + n=$((n+1)) + if [ "$n" -eq "10" ]; then + kill -INT $pid + exit 99 + fi +done +# Run the tests +./test_qbraid +# Did they fail? +testsPassed=$? +# kill the server +kill -INT $pid +# return success / failure +exit $testsPassed diff --git a/unittests/backends/qbraid/QbraidTester.cpp b/unittests/backends/qbraid/QbraidTester.cpp new file mode 100644 index 00000000000..e3e94fb8087 --- /dev/null +++ b/unittests/backends/qbraid/QbraidTester.cpp @@ -0,0 +1,177 @@ +/******************************************************************************* + * Copyright (c) 2022 - 2025 NVIDIA Corporation & Affiliates. * + * All rights reserved. * + * * + * This source code and the accompanying materials are made available under * + * the terms of the Apache License 2.0 which accompanies this distribution. * + ******************************************************************************/ + +#include "CUDAQTestUtils.h" +#include "common/FmtCore.h" +#include "cudaq/algorithm.h" +#include +#include +#include + +// Update the backend string to match the QBraid format +std::string mockPort = "62452"; +std::string backendStringTemplate = + "qbraid;emulate;false;url;http://localhost:{}"; + +bool isValidExpVal(double value) { + // give us some wiggle room while keep the tests fast + return value < -1.1 && value > -2.3; +} + +CUDAQ_TEST(QbraidTester, checkSampleSync) { + auto backendString = + fmt::format(fmt::runtime(backendStringTemplate), mockPort); + + auto &platform = cudaq::get_platform(); + platform.setTargetBackend(backendString); + + auto kernel = cudaq::make_kernel(); + auto qubit = kernel.qalloc(2); + kernel.h(qubit[0]); + kernel.mz(qubit[0]); + + auto counts = cudaq::sample(kernel); + counts.dump(); + EXPECT_EQ(counts.size(), 2); +} + +CUDAQ_TEST(QbraidTester, checkSampleAsync) { + auto backendString = + fmt::format(fmt::runtime(backendStringTemplate), mockPort); + + auto &platform = cudaq::get_platform(); + platform.setTargetBackend(backendString); + + auto kernel = cudaq::make_kernel(); + auto qubit = kernel.qalloc(2); + kernel.h(qubit[0]); + kernel.mz(qubit[0]); + + auto future = cudaq::sample_async(kernel); + auto counts = future.get(); + EXPECT_EQ(counts.size(), 2); +} + +CUDAQ_TEST(QbraidTester, checkSampleAsyncLoadFromFile) { + auto backendString = + fmt::format(fmt::runtime(backendStringTemplate), mockPort); + + auto &platform = cudaq::get_platform(); + platform.setTargetBackend(backendString); + + auto kernel = cudaq::make_kernel(); + auto qubit = kernel.qalloc(2); + kernel.h(qubit[0]); + kernel.mz(qubit[0]); + + auto future = cudaq::sample_async(kernel); + { + std::ofstream out("saveMe.json"); + out << future; + } + + cudaq::async_result readIn; + std::ifstream in("saveMe.json"); + in >> readIn; + + auto counts = readIn.get(); + EXPECT_EQ(counts.size(), 2); + + std::remove("saveMe.json"); +} + +CUDAQ_TEST(QbraidTester, checkObserveSync) { + auto backendString = + fmt::format(fmt::runtime(backendStringTemplate), mockPort); + + auto &platform = cudaq::get_platform(); + platform.setTargetBackend(backendString); + + auto [kernel, theta] = cudaq::make_kernel(); + auto qubit = kernel.qalloc(2); + kernel.x(qubit[0]); + kernel.ry(theta, qubit[1]); + kernel.x(qubit[1], qubit[0]); + + using namespace cudaq::spin; + cudaq::spin_op h = 5.907 - 2.1433 * x(0) * x(1) - 2.1433 * y(0) * y(1) + + .21829 * z(0) - 6.125 * z(1); + auto result = cudaq::observe(kernel, h, .59); + result.dump(); + + printf("ENERGY: %lf\n", result.expectation()); + EXPECT_TRUE(isValidExpVal(result.expectation())); +} + +CUDAQ_TEST(QbraidTester, checkObserveAsync) { + auto backendString = + fmt::format(fmt::runtime(backendStringTemplate), mockPort); + + auto &platform = cudaq::get_platform(); + platform.setTargetBackend(backendString); + + auto [kernel, theta] = cudaq::make_kernel(); + auto qubit = kernel.qalloc(2); + kernel.x(qubit[0]); + kernel.ry(theta, qubit[1]); + kernel.x(qubit[1], qubit[0]); + + using namespace cudaq::spin; + cudaq::spin_op h = 5.907 - 2.1433 * x(0) * x(1) - 2.1433 * y(0) * y(1) + + .21829 * z(0) - 6.125 * z(1); + auto future = cudaq::observe_async(kernel, h, .59); + + auto result = future.get(); + result.dump(); + + printf("ENERGY: %lf\n", result.expectation()); + EXPECT_TRUE(isValidExpVal(result.expectation())); +} + +CUDAQ_TEST(QbraidTester, checkObserveAsyncLoadFromFile) { + auto backendString = + fmt::format(fmt::runtime(backendStringTemplate), mockPort); + + auto &platform = cudaq::get_platform(); + platform.setTargetBackend(backendString); + + auto [kernel, theta] = cudaq::make_kernel(); + auto qubit = kernel.qalloc(2); + kernel.x(qubit[0]); + kernel.ry(theta, qubit[1]); + kernel.x(qubit[1], qubit[0]); + + using namespace cudaq::spin; + cudaq::spin_op h = 5.907 - 2.1433 * x(0) * x(1) - 2.1433 * y(0) * y(1) + + .21829 * z(0) - 6.125 * z(1); + auto future = cudaq::observe_async(kernel, h, .59); + + { + std::ofstream out("saveMeObserve.json"); + out << future; + } + + cudaq::async_result readIn(&h); + std::ifstream in("saveMeObserve.json"); + in >> readIn; + + auto result = readIn.get(); + + std::remove("saveMeObserve.json"); + result.dump(); + + printf("ENERGY: %lf\n", result.expectation()); + EXPECT_TRUE(isValidExpVal(result.expectation())); +} + +int main(int argc, char **argv) { + setenv("QBRAID_API_KEY", "00000000000000000000000000000000", 0); + ::testing::InitGoogleTest(&argc, argv); + auto ret = RUN_ALL_TESTS(); + return ret; +} \ No newline at end of file diff --git a/utils/mock_qpu/__init__.py b/utils/mock_qpu/__init__.py index 8167902c1e1..c508a32c796 100644 --- a/utils/mock_qpu/__init__.py +++ b/utils/mock_qpu/__init__.py @@ -21,6 +21,7 @@ "qci": 62449, "scaleway": 62450, "tii": 62451, + "qbraid": 62452, } diff --git a/utils/mock_qpu/qbraid/__init__.py b/utils/mock_qpu/qbraid/__init__.py new file mode 100644 index 00000000000..1bb225a59f7 --- /dev/null +++ b/utils/mock_qpu/qbraid/__init__.py @@ -0,0 +1,276 @@ +# ============================================================================ # +# Copyright (c) 2022 - 2025 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +import itertools +import random +import re +import uuid +from typing import Any, Optional + +import uvicorn +from fastapi import FastAPI, Header, HTTPException, Path +from pydantic import BaseModel + +app = FastAPI() + + +class Program(BaseModel): + """Structured program payload for v2 API.""" + + format: str + data: str + + +class Job(BaseModel): + """Data required to submit a quantum job (v2 API).""" + + program: Program + shots: int + deviceQrn: str + name: Optional[str] = None + tags: Optional[dict] = None + + +JOBS_MOCK_DB = {} +JOBS_MOCK_RESULTS = {} + + +def count_qubits(qasm: str) -> int: + """Extracts the number of qubits from an OpenQASM string.""" + pattern = r"qreg\s+\w+\[(\d+)\];" + + match = re.search(pattern, qasm) + + if match: + return int(match.group(1)) + + raise ValueError("No qreg declaration found in the OpenQASM string.") + + +def simulate_job(qasm: str, num_shots: int) -> dict[str, int]: + """Simulates a quantum job by generating random measurement outcomes based on the circuit.""" + num_qubits = count_qubits(qasm) + + measured_qubits = [] + + measure_pattern = r"measure\s+(\w+)\[(\d+)\]" + measure_matches = re.findall(measure_pattern, qasm) + + hadamard_pattern = r"h\s+(\w+)\[(\d+)\]" + hadamard_matches = re.findall(hadamard_pattern, qasm) + + superposition_qubits = set() + for _, qubit_idx in hadamard_matches: + superposition_qubits.add(int(qubit_idx)) + + for _, qubit_idx in measure_matches: + measured_qubits.append(int(qubit_idx)) + + if not measured_qubits: + measured_qubits = list(range(num_qubits)) + + result = {} + + possible_states = [] + + if measured_qubits: + # Generate strings of the appropriate length for measured qubits + # For superposition qubits, include both 0 and 1 outcomes + for measured_qubit in measured_qubits: + if measured_qubit in superposition_qubits: + if not possible_states: + possible_states = ["0", "1"] + else: + new_states = [] + for state in possible_states: + new_states.append(state + "0") + new_states.append(state + "1") + possible_states = new_states + else: + if not possible_states: + possible_states = ["0"] + else: + possible_states = [state + "0" for state in possible_states] + + if not possible_states: + if superposition_qubits: + possible_states = ["0", "1"] + else: + possible_states = ["0" * num_qubits] + + distribution = random.choices(possible_states, k=num_shots) + result = {state: distribution.count(state) for state in set(distribution)} + + if ( + num_qubits == 2 + and len(measured_qubits) == 1 + and measured_qubits[0] == 0 + and 0 in superposition_qubits + ): + new_result = {} + total_shots = num_shots + half_shots = total_shots // 2 + + new_result["00"] = random.randint( + half_shots - half_shots // 4, half_shots + half_shots // 4 + ) + new_result["01"] = 0 + new_result["10"] = random.randint( + half_shots - half_shots // 4, half_shots + half_shots // 4 + ) + new_result["11"] = 0 + + remaining = total_shots - (new_result["00"] + new_result["10"]) + if remaining > 0: + new_result["00"] += remaining + + result = {k: v for k, v in new_result.items() if v > 0} + + return result + + +def poll_job_status(job_id: str) -> dict[str, Any]: + """Updates the status of a job and returns the updated job data.""" + if job_id not in JOBS_MOCK_DB: + raise HTTPException(status_code=404, detail="Job not found") + + status = JOBS_MOCK_DB[job_id]["status"] + + status_transitions = { + "INITIALIZING": "QUEUED", + "QUEUED": "RUNNING", + "RUNNING": "COMPLETED", + "CANCELLING": "CANCELLED", + } + + new_status = status_transitions.get(status, status) + JOBS_MOCK_DB[job_id]["status"] = new_status + + return {"jobQrn": job_id, **JOBS_MOCK_DB[job_id]} + + +# v2 API: POST /jobs +@app.post("/jobs") +async def postJob(job: Job, x_api_key: Optional[str] = Header(None, alias="X-API-KEY")): + """Submit a quantum job for execution (v2 API).""" + if x_api_key is None: + raise HTTPException(status_code=401, detail="API key is required") + + newId = str(uuid.uuid4()) + + # Extract QASM from the structured program payload + counts = simulate_job(job.program.data, job.shots) + + job_data = {"status": "INITIALIZING", "statusText": "", **job.model_dump()} + + JOBS_MOCK_DB[newId] = job_data + JOBS_MOCK_RESULTS[newId] = counts + + # v2 response: wrapped in success/data envelope + return {"success": True, "data": {"jobQrn": newId, "status": "INITIALIZING"}} + + +# v2 API: GET /jobs/{job_qrn} +@app.get("/jobs/{job_id}") +async def getJob( + job_id: str = Path(...), + x_api_key: Optional[str] = Header(None, alias="X-API-KEY"), +): + """Retrieve the status of a quantum job (v2 API).""" + if x_api_key is None: + raise HTTPException(status_code=401, detail="API key is required") + + job_data = poll_job_status(job_id) + + # v2 response: wrapped in success/data envelope + return {"success": True, "data": job_data} + + +# v2 API: GET /jobs/{job_qrn}/program +@app.get("/jobs/{job_id}/program") +async def getJobProgram( + job_id: str = Path(...), + x_api_key: Optional[str] = Header(None, alias="X-API-KEY"), +): + """Retrieve the program of a quantum job (v2 API).""" + if x_api_key is None: + raise HTTPException(status_code=401, detail="API key is required") + + if job_id not in JOBS_MOCK_DB: + raise HTTPException(status_code=404, detail="Job not found") + + job_data = JOBS_MOCK_DB[job_id] + + # Return the stored program in v2 format: { success, data: { format, data } } + return { + "success": True, + "data": { + "format": job_data.get("program", {}).get("format", "qasm2"), + "data": job_data.get("program", {}).get("data", ""), + }, + } + + +# v2 API: GET /jobs/{job_qrn}/result +@app.get("/jobs/{job_id}/result") +async def getJobResult( + job_id: str = Path(...), + x_api_key: Optional[str] = Header(None, alias="X-API-KEY"), +): + """Retrieve the results of a quantum job (v2 API).""" + if x_api_key is None: + raise HTTPException(status_code=401, detail="API key is required") + + if job_id not in JOBS_MOCK_DB: + raise HTTPException(status_code=404, detail="Job not found") + + if JOBS_MOCK_DB[job_id]["status"] in {"FAILED", "CANCELLED"}: + raise HTTPException( + status_code=409, detail="Results unavailable. Job failed or was cancelled." + ) + + if JOBS_MOCK_DB[job_id]["status"] != "COMPLETED": + # v2: use success=false instead of "error" field + return { + "success": False, + "data": {"status": JOBS_MOCK_DB[job_id]["status"]}, + } + + if job_id not in JOBS_MOCK_RESULTS: + raise HTTPException(status_code=500, detail="Job results not found") + + if random.random() < 0.2: + return { + "success": False, + "data": { + "status": "COMPLETED", + "message": "Failed to retrieve job results. Please wait, and try again.", + }, + } + + counts = JOBS_MOCK_RESULTS[job_id] + + # v2 response: measurementCounts nested under data.resultData + return { + "success": True, + "data": { + "resultData": {"measurementCounts": counts}, + "status": "COMPLETED", + "cost": 0, + "timeStamps": {}, + }, + } + + +def startServer(port): + """Start the REST server.""" + uvicorn.run(app, port=port, host="0.0.0.0", log_level="info") + + +if __name__ == "__main__": + startServer(62452)