Skip to content

Commit

Permalink
add performance data output
Browse files Browse the repository at this point in the history
  • Loading branch information
xufang-lisa committed Dec 19, 2024
1 parent 0e0775d commit 7993a62
Show file tree
Hide file tree
Showing 2 changed files with 15 additions and 3 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,12 @@ int main(int argc, char* argv[]) try {

// Since the streamer is set, the results will
// be printed each time a new token is generated.
pipe.generate(prompt, config, streamer);
int iter = 0;
while (iter < 10) {
pipe.generate(prompt, config, streamer);
iter++;
std::cout << "\n pipeline generate finish iter:" << iter << std::endl;
}
} catch (const std::exception& error) {
try {
std::cerr << error.what() << '\n';
Expand Down
11 changes: 9 additions & 2 deletions samples/cpp/text_generation/greedy_causal_lm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,15 @@ int main(int argc, char* argv[]) try {
ov::genai::LLMPipeline pipe(models_path, device);
ov::genai::GenerationConfig config;
config.max_new_tokens = 100;
std::string result = pipe.generate(prompt, config);
std::cout << result << std::endl;
int iter = 0;
while (iter < 10) {
auto result = pipe.generate(prompt, config);
std::cout << result.texts << std::endl;
iter++;
std::cout << "\n pipeline generate finish iter:" << iter << std::endl;
std::cout << "generate duration:" << result.perf_metrics.get_generate_duration() * 0.001 << std::endl;
std::cout << "inference duration:" << result.perf_metrics.get_inference_duration() * 0.001 << std::endl;
}
} catch (const std::exception& error) {
try {
std::cerr << error.what() << '\n';
Expand Down

0 comments on commit 7993a62

Please sign in to comment.