diff --git a/PhysicsTools/PyTorch/BuildFile.xml b/PhysicsTools/PyTorch/BuildFile.xml
new file mode 100644
index 0000000000000..511f4697bbabe
--- /dev/null
+++ b/PhysicsTools/PyTorch/BuildFile.xml
@@ -0,0 +1,8 @@
+
+
+
+
+
+
+
+
diff --git a/PhysicsTools/PyTorch/test/BuildFile.xml b/PhysicsTools/PyTorch/test/BuildFile.xml
new file mode 100644
index 0000000000000..01aeb83b73738
--- /dev/null
+++ b/PhysicsTools/PyTorch/test/BuildFile.xml
@@ -0,0 +1,24 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/PhysicsTools/PyTorch/test/simple_dnn.pt b/PhysicsTools/PyTorch/test/simple_dnn.pt
new file mode 100644
index 0000000000000..d4f602ea0b07c
Binary files /dev/null and b/PhysicsTools/PyTorch/test/simple_dnn.pt differ
diff --git a/PhysicsTools/PyTorch/test/testBase.h b/PhysicsTools/PyTorch/test/testBase.h
new file mode 100644
index 0000000000000..cb325db68e306
--- /dev/null
+++ b/PhysicsTools/PyTorch/test/testBase.h
@@ -0,0 +1,52 @@
+/*
+ * Base class for tests.
+ *
+ */
+
+#ifndef PHYSICSTOOLS_PYTORCH_TEST_TESTBASE_H
+#define PHYSICSTOOLS_PYTORCH_TEST_TESTBASE_H
+
+#include
+#include
+#include
+#include
+
+class testBasePyTorch : public CppUnit::TestFixture {
+public:
+ std::string dataPath_;
+ std::string testPath_;
+
+ void setUp();
+ void tearDown();
+ std::string cmsswPath(std::string path);
+
+ virtual void test() = 0;
+
+};
+
+void testBasePyTorch::setUp() {
+ dataPath_ =
+ cmsswPath("/test/" + std::string(std::getenv("SCRAM_ARCH")) + "/" + boost::filesystem::unique_path().string());
+
+ // create the graph
+ testPath_ = cmsswPath("/src/PhysicsTools/PyTorch/test");
+}
+
+void testBasePyTorch::tearDown() {
+ if (std::filesystem::exists(dataPath_)) {
+ std::filesystem::remove_all(dataPath_);
+ }
+}
+
+std::string testBasePyTorch::cmsswPath(std::string path) {
+ if (path.size() > 0 && path.substr(0, 1) != "/") {
+ path = "/" + path;
+ }
+
+ std::string base = std::string(std::getenv("CMSSW_BASE"));
+ std::string releaseBase = std::string(std::getenv("CMSSW_RELEASE_BASE"));
+
+ return (std::filesystem::exists(base.c_str()) ? base : releaseBase) + path;
+}
+
+#endif // PHYSICSTOOLS_PYTORCH_TEST_TESTBASE_H
diff --git a/PhysicsTools/PyTorch/test/testBaseCUDA.h b/PhysicsTools/PyTorch/test/testBaseCUDA.h
new file mode 100644
index 0000000000000..cb8d496dd1eb2
--- /dev/null
+++ b/PhysicsTools/PyTorch/test/testBaseCUDA.h
@@ -0,0 +1,65 @@
+/*
+ * Base class for tests.
+ *
+ */
+
+#ifndef PHYSICSTOOLS_PYTORCH_TEST_TESTBASECUDA_H
+#define PHYSICSTOOLS_PYTORCH_TEST_TESTBASECUDA_H
+
+#include
+#include
+#include
+#include
+
+#include "FWCore/ParameterSet/interface/ParameterSet.h"
+#include "FWCore/ParameterSet/interface/ConfigurationDescriptions.h"
+#include "FWCore/ParameterSetReader/interface/ParameterSetReader.h"
+#include "FWCore/PluginManager/interface/PluginManager.h"
+#include "FWCore/PluginManager/interface/standard.h"
+#include "FWCore/ServiceRegistry/interface/Service.h"
+#include "FWCore/ServiceRegistry/interface/ServiceRegistry.h"
+#include "FWCore/ServiceRegistry/interface/ServiceToken.h"
+#include "FWCore/Utilities/interface/Exception.h"
+#include "FWCore/Utilities/interface/ResourceInformation.h"
+#include "HeterogeneousCore/CUDAServices/interface/CUDAInterface.h"
+#include "HeterogeneousCore/CUDAUtilities/interface/requireDevices.h"
+
+class testBasePyTorchCUDA : public CppUnit::TestFixture {
+public:
+ std::string dataPath_;
+ std::string testPath_;
+
+ void setUp();
+ void tearDown();
+ std::string cmsswPath(std::string path);
+
+ virtual void test() = 0;
+
+};
+
+void testBasePyTorchCUDA::setUp() {
+ dataPath_ =
+ cmsswPath("/test/" + std::string(std::getenv("SCRAM_ARCH")) + "/" + boost::filesystem::unique_path().string());
+
+ // create the graph
+ testPath_ = cmsswPath("/src/PhysicsTools/PyTorch/test");
+}
+
+void testBasePyTorchCUDA::tearDown() {
+ if (std::filesystem::exists(dataPath_)) {
+ std::filesystem::remove_all(dataPath_);
+ }
+}
+
+std::string testBasePyTorchCUDA::cmsswPath(std::string path) {
+ if (path.size() > 0 && path.substr(0, 1) != "/") {
+ path = "/" + path;
+ }
+
+ std::string base = std::string(std::getenv("CMSSW_BASE"));
+ std::string releaseBase = std::string(std::getenv("CMSSW_RELEASE_BASE"));
+
+ return (std::filesystem::exists(base.c_str()) ? base : releaseBase) + path;
+}
+
+#endif // PHYSICSTOOLS_PYTORCH_TEST_TESTBASE_H
diff --git a/PhysicsTools/PyTorch/test/testRunner.cc b/PhysicsTools/PyTorch/test/testRunner.cc
new file mode 100644
index 0000000000000..1482cf9a9ce85
--- /dev/null
+++ b/PhysicsTools/PyTorch/test/testRunner.cc
@@ -0,0 +1 @@
+#include
diff --git a/PhysicsTools/PyTorch/test/testTorchSimpleDnn.cc b/PhysicsTools/PyTorch/test/testTorchSimpleDnn.cc
new file mode 100644
index 0000000000000..28da1f2a1f73c
--- /dev/null
+++ b/PhysicsTools/PyTorch/test/testTorchSimpleDnn.cc
@@ -0,0 +1,56 @@
+#include
+#include "testBase.h"
+#include
+#include
+#include
+
+
+class testSimpleDNN : public testBasePyTorch {
+ CPPUNIT_TEST_SUITE(testSimpleDNN);
+ CPPUNIT_TEST(test);
+ CPPUNIT_TEST_SUITE_END();
+
+public:
+ void test() override;
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(testSimpleDNN);
+
+void testSimpleDNN::test() {
+ std::string model_path = testPath_ + "/simple_dnn.pt";
+ torch::Device device(torch::kCPU);
+ torch::jit::script::Module module;
+ try {
+ // Deserialize the ScriptModule from a file using torch::jit::load().
+ module = torch::jit::load(model_path);
+ module.to(device);
+ }
+ catch (const c10::Error& e) {
+
+ std::cerr << "error loading the model\n" << e.what() << std::endl;
+ }
+ // Create a vector of inputs.
+ std::vector inputs;
+ inputs.push_back(torch::ones(10, device));
+
+
+ // Execute the model and turn its output into a tensor.
+ at::Tensor output = module.forward(inputs).toTensor();
+ std::cout << "output: "<< output << '\n';
+ CPPUNIT_ASSERT(output.item() == 110.);
+ std::cout << "ok\n";
+}
+
+
+
+// int main(int argc, const char* argv[]) {
+// std::cout << "Running model on CPU" << std::endl;
+// torch::Device cpu(torch::kCPU);
+// runModel("/data/user/dvalsecc/simple_dnn.pt", cpu);
+
+// std::cout << "Running model on CUDA" << std::endl;
+// torch::Device cuda(torch::kCUDA);
+// runModel("/data/user/dvalsecc/simple_dnn.pt", cuda);
+
+// return 0;
+// }
diff --git a/PhysicsTools/PyTorch/test/testTorchSimpleDnnCUDA.cc b/PhysicsTools/PyTorch/test/testTorchSimpleDnnCUDA.cc
new file mode 100644
index 0000000000000..da6bc028c90be
--- /dev/null
+++ b/PhysicsTools/PyTorch/test/testTorchSimpleDnnCUDA.cc
@@ -0,0 +1,82 @@
+#include
+#include "testBaseCUDA.h"
+#include
+#include
+#include
+
+
+class testSimpleDNNCUDA : public testBasePyTorchCUDA {
+ CPPUNIT_TEST_SUITE(testSimpleDNNCUDA);
+ CPPUNIT_TEST(test);
+ CPPUNIT_TEST_SUITE_END();
+
+public:
+ void test() override;
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(testSimpleDNNCUDA);
+
+void testSimpleDNNCUDA::test() {
+ if (!cms::cudatest::testDevices())
+ return;
+
+ std::vector psets;
+ edm::ServiceToken serviceToken = edm::ServiceRegistry::createSet(psets);
+ edm::ServiceRegistry::Operate operate(serviceToken);
+
+ // Setup the CUDA Service
+ edmplugin::PluginManager::configure(edmplugin::standard::config());
+
+ std::string const config = R"_(import FWCore.ParameterSet.Config as cms
+process = cms.Process('Test')
+process.add_(cms.Service('ResourceInformationService'))
+process.add_(cms.Service('CUDAService'))
+)_";
+ std::unique_ptr params;
+ edm::makeParameterSets(config, params);
+ edm::ServiceToken tempToken(edm::ServiceRegistry::createServicesFromConfig(std::move(params)));
+ edm::ServiceRegistry::Operate operate2(tempToken);
+ edm::Service cuda;
+ std::cout << "CUDA service enabled: " << cuda->enabled() << std::endl;
+
+ std::cout << "Testing CUDA backend" << std::endl;
+
+
+
+ std::string model_path = testPath_ + "/simple_dnn.pt";
+ torch::Device device(torch::kCUDA );
+ torch::jit::script::Module module;
+ try {
+ // Deserialize the ScriptModule from a file using torch::jit::load().
+ module = torch::jit::load(model_path);
+ module.to(device);
+ }
+ catch (const c10::Error& e) {
+
+ std::cerr << "error loading the model\n" << e.what() << std::endl;
+ }
+ // Create a vector of inputs.
+ std::vector inputs;
+ inputs.push_back(torch::ones(10, device));
+
+
+ // Execute the model and turn its output into a tensor.
+ at::Tensor output = module.forward(inputs).toTensor();
+ std::cout << "output: "<< output << '\n';
+ CPPUNIT_ASSERT(output.item() == 110.);
+ std::cout << "ok\n";
+}
+
+
+
+// int main(int argc, const char* argv[]) {
+// std::cout << "Running model on CPU" << std::endl;
+// torch::Device cpu(torch::kCPU);
+// runModel("/data/user/dvalsecc/simple_dnn.pt", cpu);
+
+// std::cout << "Running model on CUDA" << std::endl;
+// torch::Device cuda(torch::kCUDA);
+// runModel("/data/user/dvalsecc/simple_dnn.pt", cuda);
+
+// return 0;
+// }
diff --git a/PhysicsTools/PyTorch/test/testTorchSimpleDnnCuda.cc b/PhysicsTools/PyTorch/test/testTorchSimpleDnnCuda.cc
new file mode 100644
index 0000000000000..da6bc028c90be
--- /dev/null
+++ b/PhysicsTools/PyTorch/test/testTorchSimpleDnnCuda.cc
@@ -0,0 +1,82 @@
+#include
+#include "testBaseCUDA.h"
+#include
+#include
+#include
+
+
+class testSimpleDNNCUDA : public testBasePyTorchCUDA {
+ CPPUNIT_TEST_SUITE(testSimpleDNNCUDA);
+ CPPUNIT_TEST(test);
+ CPPUNIT_TEST_SUITE_END();
+
+public:
+ void test() override;
+};
+
+CPPUNIT_TEST_SUITE_REGISTRATION(testSimpleDNNCUDA);
+
+void testSimpleDNNCUDA::test() {
+ if (!cms::cudatest::testDevices())
+ return;
+
+ std::vector psets;
+ edm::ServiceToken serviceToken = edm::ServiceRegistry::createSet(psets);
+ edm::ServiceRegistry::Operate operate(serviceToken);
+
+ // Setup the CUDA Service
+ edmplugin::PluginManager::configure(edmplugin::standard::config());
+
+ std::string const config = R"_(import FWCore.ParameterSet.Config as cms
+process = cms.Process('Test')
+process.add_(cms.Service('ResourceInformationService'))
+process.add_(cms.Service('CUDAService'))
+)_";
+ std::unique_ptr params;
+ edm::makeParameterSets(config, params);
+ edm::ServiceToken tempToken(edm::ServiceRegistry::createServicesFromConfig(std::move(params)));
+ edm::ServiceRegistry::Operate operate2(tempToken);
+ edm::Service cuda;
+ std::cout << "CUDA service enabled: " << cuda->enabled() << std::endl;
+
+ std::cout << "Testing CUDA backend" << std::endl;
+
+
+
+ std::string model_path = testPath_ + "/simple_dnn.pt";
+ torch::Device device(torch::kCUDA );
+ torch::jit::script::Module module;
+ try {
+ // Deserialize the ScriptModule from a file using torch::jit::load().
+ module = torch::jit::load(model_path);
+ module.to(device);
+ }
+ catch (const c10::Error& e) {
+
+ std::cerr << "error loading the model\n" << e.what() << std::endl;
+ }
+ // Create a vector of inputs.
+ std::vector inputs;
+ inputs.push_back(torch::ones(10, device));
+
+
+ // Execute the model and turn its output into a tensor.
+ at::Tensor output = module.forward(inputs).toTensor();
+ std::cout << "output: "<< output << '\n';
+ CPPUNIT_ASSERT(output.item() == 110.);
+ std::cout << "ok\n";
+}
+
+
+
+// int main(int argc, const char* argv[]) {
+// std::cout << "Running model on CPU" << std::endl;
+// torch::Device cpu(torch::kCPU);
+// runModel("/data/user/dvalsecc/simple_dnn.pt", cpu);
+
+// std::cout << "Running model on CUDA" << std::endl;
+// torch::Device cuda(torch::kCUDA);
+// runModel("/data/user/dvalsecc/simple_dnn.pt", cuda);
+
+// return 0;
+// }