-
Notifications
You must be signed in to change notification settings - Fork 0
/
ai_tools.py
127 lines (98 loc) · 3.02 KB
/
ai_tools.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
from typing import List, Optional, Union
from agents.coder import WritePRParams
from agents.contributor import AmendPRParams
from ai import llm
from tools.github import GithubFile
from tools.jira import Issue
from utils.state import Conversation
from pydantic import BaseModel, Field
def sumamrize_test_failure(
pr: Union[WritePRParams, AmendPRParams],
failure_msg: str,
repo_files: List[Optional[GithubFile]],
) -> str:
codebase = "\n".join(str(f) for f in repo_files)
PROMPT = f"""I wrote the following Pull Request with unit tests, but the tests failed.
Help me understand the error and fix the tests.
Codebase:
{codebase}
Pull request:
{pr}
Test failure message:
```
{failure_msg}
```
In one sentence, explain the error.
Then, tell me how to fix the tests. Provide code changes. Tests must use mocked data.
Error: ...
Fix: ..."""
summary = llm.stream_next(
[
{
"role": "user",
"content": PROMPT,
}
]
)
assert isinstance(summary, str)
return summary
def there_is_followup(text: str) -> bool:
PROMPT = f"""Does the following text explicitly say that there is a followup action?
{text}
yes/no"""
yes_no = llm.stream_next(
[
{
"role": "user",
"content": PROMPT,
}
]
)
assert isinstance(yes_no, str)
return "yes" in yes_no.lower()
class SummaryParams(BaseModel):
project_description: str = Field(description="Brief description of the project.")
architecture_overview: str = Field(
description="Overview of the system architecture."
)
def summarize_architecture(conversation: Conversation) -> SummaryParams:
TOOLS = [
{
"type": "function",
"function": {
"name": "summarize",
"description": "Summarizes the project.",
"parameters": SummaryParams.schema(),
},
},
]
PROMPT = f"""Provide a brief description of the project and an overview of the system architecture."""
summary = llm.stream_next(
conversation
+ [
{
"role": "system",
"content": PROMPT,
}
],
tools=TOOLS,
)
assert isinstance(summary, llm.RawTool)
return SummaryParams.model_validate(summary.arguments)
def suggest_code(ticket: Issue, repo_files: List[Optional[GithubFile]]) -> str:
codebase = "\n".join(str(f) for f in repo_files)
PROMPT = f"""I have to finish the following Jira ticket:
{ticket.title} - {ticket.description}
The code will be added to this codebase:
{codebase}
In 1 or 2 sentences, suggest me what code to write. Provide no code examples."""
suggestion = llm.stream_next(
[
{
"role": "user",
"content": PROMPT,
}
]
)
assert isinstance(suggestion, str)
return suggestion