From 0bfcc4abe4abf125a9d7523757880844138dc5fe Mon Sep 17 00:00:00 2001 From: Davide Valsecchi Date: Fri, 1 Dec 2023 17:57:56 +0100 Subject: [PATCH] codeformat --- .../PyTorch/test/create_simple_dnn.py | 20 +++++++++++++++++++ PhysicsTools/PyTorch/test/testBase.h | 1 - PhysicsTools/PyTorch/test/testBaseCUDA.h | 1 - .../PyTorch/test/testTorchSimpleDnn.cc | 12 +++-------- .../PyTorch/test/testTorchSimpleDnnCUDA.cc | 16 ++++----------- .../PyTorch/test/testTorchSimpleDnnCuda.cc | 16 ++++----------- 6 files changed, 31 insertions(+), 35 deletions(-) create mode 100644 PhysicsTools/PyTorch/test/create_simple_dnn.py diff --git a/PhysicsTools/PyTorch/test/create_simple_dnn.py b/PhysicsTools/PyTorch/test/create_simple_dnn.py new file mode 100644 index 0000000000000..868980d910102 --- /dev/null +++ b/PhysicsTools/PyTorch/test/create_simple_dnn.py @@ -0,0 +1,20 @@ +import torch + +class MyModule(torch.nn.Module): + def __init__(self, N, M): + super(MyModule, self).__init__() + self.weight = torch.nn.Parameter(torch.ones(N, M)) + self.bias = torch.nn.Parameter(torch.ones(N)) + + def forward(self, input): + return torch.sum(torch.nn.functional.elu(self.weight.mv(input) + self.bias)) + + +module = MyModule(10, 10) +x = torch.ones(10) + +tm = torch.jit.trace(module.eval(), x) + +print(tm.graph) + +tm.save("simple_dnn.pt") diff --git a/PhysicsTools/PyTorch/test/testBase.h b/PhysicsTools/PyTorch/test/testBase.h index cb325db68e306..03998bf885897 100644 --- a/PhysicsTools/PyTorch/test/testBase.h +++ b/PhysicsTools/PyTorch/test/testBase.h @@ -21,7 +21,6 @@ class testBasePyTorch : public CppUnit::TestFixture { std::string cmsswPath(std::string path); virtual void test() = 0; - }; void testBasePyTorch::setUp() { diff --git a/PhysicsTools/PyTorch/test/testBaseCUDA.h b/PhysicsTools/PyTorch/test/testBaseCUDA.h index cb8d496dd1eb2..c7d643c113da4 100644 --- a/PhysicsTools/PyTorch/test/testBaseCUDA.h +++ b/PhysicsTools/PyTorch/test/testBaseCUDA.h @@ -34,7 +34,6 @@ class testBasePyTorchCUDA : public CppUnit::TestFixture { std::string cmsswPath(std::string path); virtual void test() = 0; - }; void testBasePyTorchCUDA::setUp() { diff --git a/PhysicsTools/PyTorch/test/testTorchSimpleDnn.cc b/PhysicsTools/PyTorch/test/testTorchSimpleDnn.cc index 28da1f2a1f73c..e28803a52dafd 100644 --- a/PhysicsTools/PyTorch/test/testTorchSimpleDnn.cc +++ b/PhysicsTools/PyTorch/test/testTorchSimpleDnn.cc @@ -4,7 +4,6 @@ #include #include - class testSimpleDNN : public testBasePyTorch { CPPUNIT_TEST_SUITE(testSimpleDNN); CPPUNIT_TEST(test); @@ -24,25 +23,20 @@ void testSimpleDNN::test() { // Deserialize the ScriptModule from a file using torch::jit::load(). module = torch::jit::load(model_path); module.to(device); - } - catch (const c10::Error& e) { - + } catch (const c10::Error& e) { std::cerr << "error loading the model\n" << e.what() << std::endl; } // Create a vector of inputs. std::vector inputs; inputs.push_back(torch::ones(10, device)); - // Execute the model and turn its output into a tensor. at::Tensor output = module.forward(inputs).toTensor(); - std::cout << "output: "<< output << '\n'; + std::cout << "output: " << output << '\n'; CPPUNIT_ASSERT(output.item() == 110.); std::cout << "ok\n"; } - - // int main(int argc, const char* argv[]) { // std::cout << "Running model on CPU" << std::endl; // torch::Device cpu(torch::kCPU); @@ -51,6 +45,6 @@ void testSimpleDNN::test() { // std::cout << "Running model on CUDA" << std::endl; // torch::Device cuda(torch::kCUDA); // runModel("/data/user/dvalsecc/simple_dnn.pt", cuda); - + // return 0; // } diff --git a/PhysicsTools/PyTorch/test/testTorchSimpleDnnCUDA.cc b/PhysicsTools/PyTorch/test/testTorchSimpleDnnCUDA.cc index da6bc028c90be..4378d2215e491 100644 --- a/PhysicsTools/PyTorch/test/testTorchSimpleDnnCUDA.cc +++ b/PhysicsTools/PyTorch/test/testTorchSimpleDnnCUDA.cc @@ -4,7 +4,6 @@ #include #include - class testSimpleDNNCUDA : public testBasePyTorchCUDA { CPPUNIT_TEST_SUITE(testSimpleDNNCUDA); CPPUNIT_TEST(test); @@ -41,34 +40,27 @@ process.add_(cms.Service('CUDAService')) std::cout << "Testing CUDA backend" << std::endl; - - std::string model_path = testPath_ + "/simple_dnn.pt"; - torch::Device device(torch::kCUDA ); + torch::Device device(torch::kCUDA); torch::jit::script::Module module; try { // Deserialize the ScriptModule from a file using torch::jit::load(). module = torch::jit::load(model_path); module.to(device); - } - catch (const c10::Error& e) { - + } catch (const c10::Error& e) { std::cerr << "error loading the model\n" << e.what() << std::endl; } // Create a vector of inputs. std::vector inputs; inputs.push_back(torch::ones(10, device)); - // Execute the model and turn its output into a tensor. at::Tensor output = module.forward(inputs).toTensor(); - std::cout << "output: "<< output << '\n'; + std::cout << "output: " << output << '\n'; CPPUNIT_ASSERT(output.item() == 110.); std::cout << "ok\n"; } - - // int main(int argc, const char* argv[]) { // std::cout << "Running model on CPU" << std::endl; // torch::Device cpu(torch::kCPU); @@ -77,6 +69,6 @@ process.add_(cms.Service('CUDAService')) // std::cout << "Running model on CUDA" << std::endl; // torch::Device cuda(torch::kCUDA); // runModel("/data/user/dvalsecc/simple_dnn.pt", cuda); - + // return 0; // } diff --git a/PhysicsTools/PyTorch/test/testTorchSimpleDnnCuda.cc b/PhysicsTools/PyTorch/test/testTorchSimpleDnnCuda.cc index da6bc028c90be..4378d2215e491 100644 --- a/PhysicsTools/PyTorch/test/testTorchSimpleDnnCuda.cc +++ b/PhysicsTools/PyTorch/test/testTorchSimpleDnnCuda.cc @@ -4,7 +4,6 @@ #include #include - class testSimpleDNNCUDA : public testBasePyTorchCUDA { CPPUNIT_TEST_SUITE(testSimpleDNNCUDA); CPPUNIT_TEST(test); @@ -41,34 +40,27 @@ process.add_(cms.Service('CUDAService')) std::cout << "Testing CUDA backend" << std::endl; - - std::string model_path = testPath_ + "/simple_dnn.pt"; - torch::Device device(torch::kCUDA ); + torch::Device device(torch::kCUDA); torch::jit::script::Module module; try { // Deserialize the ScriptModule from a file using torch::jit::load(). module = torch::jit::load(model_path); module.to(device); - } - catch (const c10::Error& e) { - + } catch (const c10::Error& e) { std::cerr << "error loading the model\n" << e.what() << std::endl; } // Create a vector of inputs. std::vector inputs; inputs.push_back(torch::ones(10, device)); - // Execute the model and turn its output into a tensor. at::Tensor output = module.forward(inputs).toTensor(); - std::cout << "output: "<< output << '\n'; + std::cout << "output: " << output << '\n'; CPPUNIT_ASSERT(output.item() == 110.); std::cout << "ok\n"; } - - // int main(int argc, const char* argv[]) { // std::cout << "Running model on CPU" << std::endl; // torch::Device cpu(torch::kCPU); @@ -77,6 +69,6 @@ process.add_(cms.Service('CUDAService')) // std::cout << "Running model on CUDA" << std::endl; // torch::Device cuda(torch::kCUDA); // runModel("/data/user/dvalsecc/simple_dnn.pt", cuda); - + // return 0; // }