Skip to content

Commit

Permalink
Merged refs/pull/41162/head from repository cms-sw with cms-merge-topic
Browse files Browse the repository at this point in the history
  • Loading branch information
cmsbuild committed Nov 29, 2023
2 parents 6b19bb3 + db19114 commit 6027c04
Show file tree
Hide file tree
Showing 3 changed files with 105 additions and 0 deletions.
9 changes: 9 additions & 0 deletions PhysicsTools/PythonAnalysis/test/BuildFile.xml
Original file line number Diff line number Diff line change
Expand Up @@ -129,3 +129,12 @@
<test name="run-flawfinder" command="flawfinder -h"/>
<test name="run-ipython" command="ipython -h"/>
<test name="run-pylint" command="pylint -h"/>

<bin name="testTorch" file="testTorch.cc">
<use name="pytorch"/>
</bin>

<bin name="testTorchTimeSeries" file="time_serie_prediction.cpp">
<use name="pytorch"/>
<use name="pytorch-cuda"/>
</bin>
64 changes: 64 additions & 0 deletions PhysicsTools/PythonAnalysis/test/testTorch.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
// Based on https://github.com/Maverobot/libtorch_examples/blob/master/src/simple_optimization_example.cpp
#include <torch/torch.h>
#include <cstdlib>
#include <iostream>

constexpr double kLearningRate = 0.001;
constexpr int kMaxIterations = 100000;

void native_run(double minimal) {
// Initial x value
auto x = torch::randn({1, 1}, torch::requires_grad(true));

for (size_t t = 0; t < kMaxIterations; t++) {
// Expression/value to be minimized
auto y = (x - minimal) * (x - minimal);
if (y.item<double>() < 1e-3) {
break;
}
// Calculate gradient
y.backward();

// Step x value without considering gradient
torch::NoGradGuard no_grad_guard;
x -= kLearningRate * x.grad();

// Reset the gradient of variable x
x.mutable_grad().reset();
}

std::cout << "[native] Actual minimal x value: " << minimal << ", calculated optimal x value: " << x.item<double>()
<< std::endl;
}

void optimizer_run(double minimal) {
// Initial x value
std::vector<torch::Tensor> x;
x.push_back(torch::randn({1, 1}, torch::requires_grad(true)));
auto opt = torch::optim::SGD(x, torch::optim::SGDOptions(kLearningRate));

for (size_t t = 0; t < kMaxIterations; t++) {
// Expression/value to be minimized
auto y = (x[0] - minimal) * (x[0] - minimal);
if (y.item<double>() < 1e-3) {
break;
}
// Calculate gradient
y.backward();

// Step x value without considering gradient
opt.step();
// Reset the gradient of variable x
opt.zero_grad();
}

std::cout << "[optimizer] Actual minimal x value: " << minimal
<< ", calculated optimal x value: " << x[0].item<double>() << std::endl;
}

// optimize y = (x - 10)^2
int main(int argc, char* argv[]) {
native_run(0.01);
optimizer_run(0.01);
return 0;
}
32 changes: 32 additions & 0 deletions PhysicsTools/PythonAnalysis/test/time_serie_prediction.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
#include <torch/torch.h>
#include <iostream>

struct Net : torch::nn::Module {
Net(int64_t N, int64_t M) : linear(register_module("linear", torch::nn::Linear(N, M))) {
another_bias = register_parameter("b", torch::randn(M));
}
torch::Tensor forward(torch::Tensor input) { return linear(input) + another_bias; }
torch::nn::Linear linear;
torch::Tensor another_bias;
};

int main(int /*argc*/, char* /*argv*/[]) {
// Use GPU when present, CPU otherwise.
Net net(4, 5);

torch::Device device(torch::kCPU);
if (torch::cuda::is_available()) {
device = torch::Device(torch::kCUDA);
std::cout << "CUDA is available! Training on GPU." << std::endl;
}

net.to(device);

for (const auto& pair : net.named_parameters()) {
std::cout << pair.key() << ": " << pair.value() << std::endl;
}

std::cout << net.forward(torch::ones({2, 4})) << std::endl;

return 0;
}

0 comments on commit 6027c04

Please sign in to comment.