-
Notifications
You must be signed in to change notification settings - Fork 0
/
evaluate.py
83 lines (73 loc) · 3.39 KB
/
evaluate.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
import argparse
import json
from aptness.evaluations.aptness import APTNESSEvaluator
from aptness.evaluations.base import BaseEvaluator
from aptness.evaluations.rag import RAGEvaluator
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="APTNESS evaluator")
datasets = ['ed', 'extes']
parser.add_argument("-d", "--dataset", default="ed", type=str,
help="one of: {}".format(", ".join(sorted(datasets))))
parser.add_argument("-dd", "--data_dir", default="data", type=str,
help="data directory")
parser.add_argument("-pd", "--prompts_dir", default="prompts", type=str,
help="prompts directory")
strategies = ['esconv', 'extes']
parser.add_argument("-s", "--strategy", default="extes", type=str,
help="one of: {}".format(", ".join(sorted(datasets))))
methods = ['baseline', 'rag', 'aptness']
parser.add_argument("-m", "--method", default="data", type=str,
help="Methods used for generation.")
parser.add_argument('--model_name', type=str, help='bar help')
parser.add_argument('--model_api_key', type=str, help='bar help')
parser.add_argument('--model_api_base', type=str, help='bar help')
parser.add_argument('--strategy_name', type=str, help='bar help')
parser.add_argument('--strategy_api_key', type=str, help='bar help')
parser.add_argument('--strategy_api_base', type=str, help='bar help')
parser.add_argument('--evaluator_name', type=str, help='bar help')
parser.add_argument('--evaluator_api_key', type=str, help='bar help')
parser.add_argument('--evaluator_api_base', type=str, help='bar help')
args = parser.parse_args()
print(args)
with open(f"{args.data_dir}/{args.dataset}.json") as fd:
test_data = json.load(fd)
if args.method == 'baseline':
evaluator = BaseEvaluator(
model_name=args.model_name,
model_api_key=args.model_api_key,
model_api_base=args.model_api_base,
evaluator_name=args.evaluator_name,
evaluator_api_key=args.evaluator_api_key,
evaluator_api_base=args.evaluator_api_base,
data_dir=args.data_dir,
prompts_dir=args.prompts_dir,
)
elif args.method == 'rag':
evaluator = RAGEvaluator(
model_name=args.model_name,
model_api_key=args.model_api_key,
model_api_base=args.model_api_base,
evaluator_name=args.evaluator_name,
evaluator_api_key=args.evaluator_api_key,
evaluator_api_base=args.evaluator_api_base,
data_dir=args.data_dir,
prompts_dir=args.prompts_dir,
)
elif args.method == 'aptness':
evaluator = APTNESSEvaluator(
model_name=args.model_name,
model_api_key=args.model_api_key,
model_api_base=args.model_api_base,
strategy_name=args.strategy_name,
strategy_api_key=args.strategy_api_key,
strategy_api_base=args.strategy_api_base,
evaluator_name=args.evaluator_name,
evaluator_api_key=args.evaluator_api_key,
evaluator_api_base=args.evaluator_api_base,
strategy=args.strategy,
data_dir=args.data_dir,
prompts_dir=args.prompts_dir,
)
else:
raise NotImplementedError
evaluator.run(test_data)