From 5dfeea18d61418fa59a7baa9af2755a4b6022a3f Mon Sep 17 00:00:00 2001 From: Faiyaz Hasan Date: Tue, 18 Apr 2023 17:02:15 -0400 Subject: [PATCH] Fix tutorial requirements file (#1580) * Update pennylane kernel requirements * Update pennylane kernel notebook * Update requirements file for dnn_comparison --- CHANGELOG.md | 4 + .../pennylane_kernel/requirements.txt | 5 +- .../pennylane_kernel/source.ipynb | 223 +++++++++++------- .../machine_learning/dnn_comparison.ipynb | 2 + .../machine_learning/requirements.txt | 2 + 5 files changed, 145 insertions(+), 91 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b257fc7c2..d33a8db9c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [UNRELEASED] +### Docs + +- Update requirements file for the tutorials: `1_QuantumMachineLearning/pennylane_kernel/source.ipynb` and `machine_learning/dnn_comparison.ipynb`. + ### Authors - Madhur Tandon diff --git a/doc/source/tutorials/1_QuantumMachineLearning/pennylane_kernel/requirements.txt b/doc/source/tutorials/1_QuantumMachineLearning/pennylane_kernel/requirements.txt index e4c8f2a27..2d9f9b9bc 100644 --- a/doc/source/tutorials/1_QuantumMachineLearning/pennylane_kernel/requirements.txt +++ b/doc/source/tutorials/1_QuantumMachineLearning/pennylane_kernel/requirements.txt @@ -1,7 +1,8 @@ covalent matplotlib==3.5.1 +numpy==1.24.2 pennylane==0.25.1 pennylane-sf==0.20.1 -pytorch==1.21.1 scikit-learn==1.0.2 -torchvision==0.13.1 +torch==2.0.0 +torchvision==0.15.1 diff --git a/doc/source/tutorials/1_QuantumMachineLearning/pennylane_kernel/source.ipynb b/doc/source/tutorials/1_QuantumMachineLearning/pennylane_kernel/source.ipynb index bd267c2e1..9ef93cf36 100644 --- a/doc/source/tutorials/1_QuantumMachineLearning/pennylane_kernel/source.ipynb +++ b/doc/source/tutorials/1_QuantumMachineLearning/pennylane_kernel/source.ipynb @@ -19,6 +19,44 @@ { "cell_type": "code", "execution_count": 1, + "id": "ab369db7", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "covalent\n", + "matplotlib==3.5.1\n", + "numpy==1.24.2\n", + "pennylane==0.25.1\n", + "pennylane-sf==0.20.1\n", + "scikit-learn==1.0.2\n", + "torch==2.0.0\n", + "torchvision==0.15.1\n" + ] + } + ], + "source": [ + "with open(\"./requirements.txt\", \"r\") as file:\n", + " for line in file:\n", + " print(line.rstrip())\n" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "1e61d293", + "metadata": {}, + "outputs": [], + "source": [ + "# Install necessary packages\n", + "# !pip install -r ./requirements.txt\n" + ] + }, + { + "cell_type": "code", + "execution_count": 3, "id": "a75f7fac", "metadata": {}, "outputs": [], @@ -41,15 +79,7 @@ "\n", "import covalent as ct\n", "\n", - "\n", - "#from covalent.executor import DaskExecutor\n", - "#from dask.distributed import LocalCluster\n", - "\n", - "#from dask.distributed import LocalCluster\n", - "#cluster=LocalCluster()\n", - "#dask=ct.executor.DaskExecutor(scheduler_address=cluster.scheduler_address) \n", - "\n", - "np.random.seed(42)" + "np.random.seed(42)\n" ] }, { @@ -68,7 +98,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 4, "id": "cdb32772", "metadata": {}, "outputs": [], @@ -88,7 +118,7 @@ "# definition of a hinge loss\n", "y_scaled = 2 * (y - 0.5)\n", "\n", - "X_train, X_test, y_train, y_test = train_test_split(X_scaled, y_scaled)" + "X_train, X_test, y_train, y_test = train_test_split(X_scaled, y_scaled)\n" ] }, { @@ -121,29 +151,32 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 5, "id": "c572c82d", "metadata": {}, "outputs": [], "source": [ - "n_qubits = len(X_train[0]) #Number of qubits needed for data encoding\n", + "n_qubits = len(X_train[0]) # Number of qubits needed for data encoding\n", "\n", - "dev_kernel = qml.device(\"default.qubit\", wires=n_qubits) #Simulator used for performing kernel-based training.\n", + "dev_kernel = qml.device(\n", + " \"default.qubit\", wires=n_qubits\n", + ") # Simulator used for performing kernel-based training.\n", "\n", "projector = np.zeros((2**n_qubits, 2**n_qubits))\n", "projector[0, 0] = 1\n", "\n", + "\n", "@ct.electron()\n", "@qml.qnode(dev_kernel)\n", "def kernel(x1, x2):\n", " AngleEmbedding(x1, wires=range(n_qubits))\n", " qml.adjoint(AngleEmbedding)(x2, wires=range(n_qubits))\n", - " return qml.expval(qml.Hermitian(projector, wires=range(n_qubits)))" + " return qml.expval(qml.Hermitian(projector, wires=range(n_qubits)))\n" ] }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 6, "id": "e0f2e1a0", "metadata": {}, "outputs": [], @@ -152,16 +185,19 @@ "def kernel_matrix(A, B):\n", " return np.array([[kernel(a, b) for b in B] for a in A])\n", "\n", + "\n", "@ct.electron()\n", - "def fit_SVC(X_train, y_train,kernel_matrix):\n", + "def fit_SVC(X_train, y_train, kernel_matrix):\n", " svm = SVC(kernel=kernel_matrix).fit(X_train, y_train)\n", " return svm\n", "\n", + "\n", "@ct.electron()\n", "def predict_SVC(svm, X_test):\n", " predictions = svm.predict(X_test)\n", " return predictions\n", "\n", + "\n", "@ct.electron()\n", "def get_accuracy_score(predictions, y_test):\n", " return accuracy_score(predictions, y_test)\n" @@ -177,7 +213,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 7, "id": "2665f5e0", "metadata": {}, "outputs": [], @@ -185,24 +221,24 @@ "@ct.lattice\n", "def kernel_workflow(X_train, y_train, X_test, y_test, kernel_matrix):\n", " svm = fit_SVC(X_train, y_train, kernel_matrix)\n", - " predictions = predict_SVC(svm,X_test)\n", + " predictions = predict_SVC(svm, X_test)\n", " acc = get_accuracy_score(predictions, y_test)\n", - " return acc" + " return acc\n" ] }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 8, "id": "4b0a1a2a", "metadata": {}, "outputs": [], "source": [ - "dispatch_id = ct.dispatch(kernel_workflow)(X_train, y_train, X_test, y_test, kernel_matrix)" + "dispatch_id = ct.dispatch(kernel_workflow)(X_train, y_train, X_test, y_test, kernel_matrix)\n" ] }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 9, "id": "ffb39054", "metadata": {}, "outputs": [ @@ -212,14 +248,14 @@ "1.0" ] }, - "execution_count": 7, + "execution_count": 9, "metadata": {}, "output_type": "execute_result" } ], "source": [ "kernel_result = ct.get_result(dispatch_id=dispatch_id, wait=True)\n", - "kernel_result.result" + "kernel_result.result\n" ] }, { @@ -258,13 +294,14 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 10, "id": "9b082a7b", "metadata": {}, "outputs": [], "source": [ "dev_var = qml.device(\"default.qubit\", wires=n_qubits)\n", "\n", + "\n", "@ct.electron()\n", "@qml.qnode(dev_var, interface=\"torch\", diff_method=\"parameter-shift\")\n", "def quantum_model(x, params):\n", @@ -274,11 +311,13 @@ " StronglyEntanglingLayers(params, wires=range(n_qubits))\n", " return qml.expval(qml.PauliZ(0))\n", "\n", + "\n", "@ct.electron()\n", "def quantum_model_plus_bias(x, params, bias):\n", "\n", " return quantum_model(x, params) + bias\n", "\n", + "\n", "@ct.electron()\n", "def hinge_loss(predictions, targets):\n", "\n", @@ -286,12 +325,12 @@ " hinge_loss = all_ones - predictions * targets\n", "\n", " hinge_loss = relu(hinge_loss)\n", - " return hinge_loss" + " return hinge_loss\n" ] }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 11, "id": "46fb6b81", "metadata": {}, "outputs": [], @@ -303,10 +342,12 @@ " bias_torch = torch.tensor(0.0)\n", " return params_torch, bias_torch\n", "\n", + "\n", "@ct.electron()\n", "def get_optimizer(params_torch, bias_torch):\n", " return torch.optim.Adam([params_torch, bias_torch], lr=0.1)\n", "\n", + "\n", "@ct.electron()\n", "def get_batch_data(batch_ids):\n", " X_batch = X_train[batch_ids]\n", @@ -314,12 +355,12 @@ "\n", " X_batch_torch = torch.tensor(X_batch, requires_grad=False)\n", " y_batch_torch = torch.tensor(y_batch, requires_grad=False)\n", - " \n", + "\n", " return X_batch_torch, y_batch_torch\n", "\n", "\n", "@ct.electron()\n", - "def step_optimizer(opt, params_torch, bias_torch, X_batch_torch, y_batch_torch):\n", + "def step_optimizer(opt, params_torch, bias_torch, X_batch_torch, y_batch_torch):\n", " def closure():\n", " opt.zero_grad()\n", " preds = torch.stack(\n", @@ -334,33 +375,42 @@ " current_loss = closure().detach().numpy().item()\n", " return opt, current_loss\n", "\n", + "\n", "@ct.electron()\n", "def get_random(n, batch_size):\n", " return np.random.choice(n, batch_size)\n", "\n", + "\n", "@ct.electron()\n", "def get_torch(x):\n", " return torch.tensor(x)\n", "\n", + "\n", "@ct.electron()\n", "def get_pred(pred_torch):\n", " pred = pred_torch.detach().numpy().item()\n", " if pred > 0:\n", " pred = 1\n", " else:\n", - " pred = -1 \n", + " pred = -1\n", " return pred\n", "\n", "\n", "@ct.electron()\n", - "def run_iteration(opt,batch_size,params_torch,bias_torch):\n", + "def run_iteration(opt, batch_size, params_torch, bias_torch):\n", " batch_ids = get_random(len(X_train), batch_size)\n", "\n", " X_batch_torch, y_batch_torch = get_batch_data(batch_ids=batch_ids)\n", - " \n", - " opt, current_loss = step_optimizer(opt = opt,params_torch = params_torch, bias_torch = bias_torch, X_batch_torch = X_batch_torch, y_batch_torch = y_batch_torch)\n", - " \n", - " return opt,current_loss" + "\n", + " opt, current_loss = step_optimizer(\n", + " opt=opt,\n", + " params_torch=params_torch,\n", + " bias_torch=bias_torch,\n", + " X_batch_torch=X_batch_torch,\n", + " y_batch_torch=y_batch_torch,\n", + " )\n", + "\n", + " return opt, current_loss\n" ] }, { @@ -388,7 +438,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 12, "id": "15612d53", "metadata": {}, "outputs": [], @@ -396,26 +446,27 @@ "@ct.electron()\n", "@ct.lattice()\n", "def quantum_model_train(n_layers, steps, batch_size):\n", - " \n", + "\n", " params_torch, bias_torch = get_params(n_layers)\n", "\n", " opt = get_optimizer(params_torch, bias_torch)\n", "\n", " loss_history = []\n", - " \n", + "\n", " for i in range(steps):\n", "\n", - " opt, current_loss = run_iteration(opt=opt,batch_size=batch_size,params_torch=params_torch,bias_torch=bias_torch)\n", - " \n", + " opt, current_loss = run_iteration(\n", + " opt=opt, batch_size=batch_size, params_torch=params_torch, bias_torch=bias_torch\n", + " )\n", + "\n", " loss_history.append(current_loss)\n", - " \n", - " return params_torch, bias_torch, loss_history\n", - "\n" + "\n", + " return params_torch, bias_torch, loss_history\n" ] }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 13, "id": "672fcab0", "metadata": {}, "outputs": [], @@ -429,29 +480,33 @@ "\n", " x_torch = get_torch(x=x)\n", " pred_torch = quantum_model_plus_bias(x=x_torch, params=trained_params, bias=trained_bias)\n", - " \n", + "\n", " pred = get_pred(pred_torch=pred_torch)\n", " p.append(pred)\n", - " return p" + " return p\n" ] }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 14, "id": "a9f3ee4b", "metadata": {}, "outputs": [], "source": [ "@ct.lattice()\n", "def variation_circuit(n_layers, steps, batch_size):\n", - " trained_params, trained_bias, loss_history = quantum_model_train(n_layers=n_layers, steps=steps, batch_size=batch_size)\n", - " pred_test = quantum_model_predict(X_pred = X_test, trained_params=trained_params, trained_bias=trained_bias)\n", - " return loss_history,trained_params, trained_bias, pred_test" + " trained_params, trained_bias, loss_history = quantum_model_train(\n", + " n_layers=n_layers, steps=steps, batch_size=batch_size\n", + " )\n", + " pred_test = quantum_model_predict(\n", + " X_pred=X_test, trained_params=trained_params, trained_bias=trained_bias\n", + " )\n", + " return loss_history, trained_params, trained_bias, pred_test\n" ] }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 15, "id": "c4e0b138", "metadata": {}, "outputs": [ @@ -459,7 +514,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "accuracy on test set: 0.36\n" + "accuracy on test set: 0.16\n" ] }, { @@ -482,9 +537,7 @@ "\n", "dispatch_id = ct.dispatch(variation_circuit)(n_layers, steps, batch_size)\n", "result = ct.get_result(dispatch_id=dispatch_id, wait=True)\n", - "loss_history,trained_params, trained_bias, pred_test = result.result\n", - "\n", - "#loss_history,trained_params, trained_bias, pred_test = variation_circuit(n_layers, steps, batch_size)\n", + "loss_history, trained_params, trained_bias, pred_test = result.result\n", "\n", "print(\"accuracy on test set:\", accuracy_score(pred_test, y_test))\n", "\n", @@ -492,7 +545,7 @@ "plt.ylim((0, 1))\n", "plt.xlabel(\"steps\")\n", "plt.ylabel(\"cost\")\n", - "plt.show()" + "plt.show()\n" ] }, { @@ -521,7 +574,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 16, "id": "62fdc7b6", "metadata": {}, "outputs": [ @@ -531,7 +584,7 @@ "7500" ] }, - "execution_count": 14, + "execution_count": 16, "metadata": {}, "output_type": "execute_result" } @@ -539,7 +592,7 @@ "source": [ "def circuit_evals_kernel(n_data, split):\n", " \"\"\"Compute how many circuit evaluations one needs for kernel-based\n", - " training and prediction.\"\"\"\n", + " training and prediction.\"\"\"\n", "\n", " M = int(np.ceil(split * n_data))\n", " Mpred = n_data - M\n", @@ -549,12 +602,13 @@ "\n", " return n_training + n_prediction\n", "\n", - "circuit_evals_kernel(n_data=len(X), split=len(X_train) /(len(X_train) + len(X_test)))" + "\n", + "circuit_evals_kernel(n_data=len(X), split=len(X_train) / (len(X_train) + len(X_test)))\n" ] }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 17, "id": "9c362c69", "metadata": {}, "outputs": [ @@ -564,7 +618,7 @@ "96025" ] }, - "execution_count": 15, + "execution_count": 17, "metadata": {}, "output_type": "execute_result" } @@ -572,7 +626,7 @@ "source": [ "def circuit_evals_variational(n_data, n_params, n_steps, shift_terms, split, batch_size):\n", " \"\"\"Compute how many circuit evaluations are needed for\n", - " variational training and prediction.\"\"\"\n", + " variational training and prediction.\"\"\"\n", "\n", " M = int(np.ceil(split * n_data))\n", " Mpred = n_data - M\n", @@ -582,19 +636,20 @@ "\n", " return n_training + n_prediction\n", "\n", + "\n", "circuit_evals_variational(\n", " n_data=len(X),\n", " n_params=len(trained_params.flatten()),\n", " n_steps=steps,\n", " shift_terms=2,\n", - " split=len(X_train) /(len(X_train) + len(X_test)),\n", + " split=len(X_train) / (len(X_train) + len(X_test)),\n", " batch_size=batch_size,\n", - ")" + ")\n" ] }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 18, "id": "15194aa1", "metadata": {}, "outputs": [ @@ -604,7 +659,7 @@ "2025" ] }, - "execution_count": 16, + "execution_count": 18, "metadata": {}, "output_type": "execute_result" } @@ -612,7 +667,7 @@ "source": [ "def model_evals_nn(n_data, n_params, n_steps, split, batch_size):\n", " \"\"\"Compute how many model evaluations are needed for neural\n", - " network training and prediction.\"\"\"\n", + " network training and prediction.\"\"\"\n", "\n", " M = int(np.ceil(split * n_data))\n", " Mpred = n_data - M\n", @@ -622,18 +677,19 @@ "\n", " return n_training + n_prediction\n", "\n", + "\n", "model_evals_nn(\n", " n_data=len(X),\n", " n_params=len(trained_params.flatten()),\n", " n_steps=steps,\n", - " split=len(X_train) /(len(X_train) + len(X_test)),\n", + " split=len(X_train) / (len(X_train) + len(X_test)),\n", " batch_size=batch_size,\n", - ")" + ")\n" ] }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 19, "id": "9a89a42c", "metadata": {}, "outputs": [ @@ -660,26 +716,23 @@ "for M in x_axis:\n", "\n", " var1 = circuit_evals_variational(\n", - " n_data=M, n_params=M, n_steps=M, shift_terms=2, split=0.75, batch_size=1\n", + " n_data=M, n_params=M, n_steps=M, shift_terms=2, split=0.75, batch_size=1\n", " )\n", " variational_training1.append(var1)\n", "\n", " var2 = circuit_evals_variational(\n", - " n_data=M, n_params=round(np.sqrt(M)), n_steps=M,\n", - " shift_terms=2, split=0.75, batch_size=1\n", + " n_data=M, n_params=round(np.sqrt(M)), n_steps=M, shift_terms=2, split=0.75, batch_size=1\n", " )\n", " variational_training2.append(var2)\n", "\n", " kernel = circuit_evals_kernel(n_data=M, split=0.75)\n", " kernelbased_training.append(kernel)\n", "\n", - " nn = model_evals_nn(\n", - " n_data=M, n_params=M, n_steps=M, split=0.75, batch_size=1\n", - " )\n", + " nn = model_evals_nn(n_data=M, n_params=M, n_steps=M, split=0.75, batch_size=1)\n", " nn_training.append(nn)\n", "\n", "\n", - "plt.plot(x_axis, nn_training, linestyle='--', label=\"neural net\")\n", + "plt.plot(x_axis, nn_training, linestyle=\"--\", label=\"neural net\")\n", "plt.plot(x_axis, variational_training1, label=\"var. circuit (linear param scaling)\")\n", "plt.plot(x_axis, variational_training2, label=\"var. circuit (srqt param scaling)\")\n", "plt.plot(x_axis, kernelbased_training, label=\"(quantum) kernel\")\n", @@ -687,7 +740,7 @@ "plt.ylabel(\"number of evaluations\")\n", "plt.legend()\n", "plt.tight_layout()\n", - "plt.show()" + "plt.show()\n" ] }, { @@ -703,14 +756,6 @@ "\n", "With the use of Covalent we can create workflows that can save intermediate results during training and also helps us monitor the training process." ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "89f6c85b", - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { @@ -729,7 +774,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.4" + "version": "3.8.13" } }, "nbformat": 4, diff --git a/doc/source/tutorials/machine_learning/dnn_comparison.ipynb b/doc/source/tutorials/machine_learning/dnn_comparison.ipynb index ed9653284..be958fe5c 100644 --- a/doc/source/tutorials/machine_learning/dnn_comparison.ipynb +++ b/doc/source/tutorials/machine_learning/dnn_comparison.ipynb @@ -99,6 +99,8 @@ "output_type": "stream", "text": [ "covalent\n", + "covalent-aws-plugins[batch]\n", + "covalent-aws-plugins[ecs]\n", "pandas==1.4.4\n", "tensorflow==2.9.1\n" ] diff --git a/doc/source/tutorials/machine_learning/requirements.txt b/doc/source/tutorials/machine_learning/requirements.txt index 61e249871..28ab401cc 100644 --- a/doc/source/tutorials/machine_learning/requirements.txt +++ b/doc/source/tutorials/machine_learning/requirements.txt @@ -1,3 +1,5 @@ covalent +covalent-aws-plugins[batch] +covalent-aws-plugins[ecs] pandas==1.4.4 tensorflow==2.9.1