-
Notifications
You must be signed in to change notification settings - Fork 1
/
example.py
59 lines (43 loc) · 1.38 KB
/
example.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import asyncio
import os
import yaml
from openai import AsyncOpenAI
from dotenv import load_dotenv
from gptci import *
# load enviromental variables from .env
load_dotenv()
client = AsyncOpenAI(
api_key=os.environ['OPENAI_API_KEY'], # this is also the default, it can be omitted
)
async def main():
# load data
with open('data/smoking.yaml') as file:
data = yaml.safe_load(file)
for v in data['variables']:
print("{name}: {description}".format(**v))
print("--------------- \n \n")
print("asking if SMK and LC are independent \n\n")
out = await gpt_ci(client, "SMK", "LC", None, data, n = 4, tryagain = True, verbose = True)
print("\n")
print(f"voted answer: {out[0]} \n")
print(f"parsed answers:")
print(out[1])
print("\n")
print("Model output:")
for o in out[2]:
print(o + "\n\n")
print("--------------- \n \n")
print("asking if SMK and PN are independent given LC \n \n")
out2 = await gpt_ci(client, "SMK", "PN", ["LC"], data, n = 4, verbose = True)
print("\n")
print(f"voted answer: {out2[0]} \n")
print(f"parsed answers:")
print(out2[1])
print("\n")
print("Model output:")
for o in out2[2]:
print(o + "\n\n")
print("trying concurrent task async")
resa = await gpt_cis(client, data['ci-statements'], data, n = 2)
print(resa)
asyncio.run(main())