diff --git a/.github/workflows/ai-code-review.yml b/.github/workflows/ai-code-review.yml index 226acd8..f7a5a92 100644 --- a/.github/workflows/ai-code-review.yml +++ b/.github/workflows/ai-code-review.yml @@ -53,7 +53,7 @@ jobs: github_repository: ${{ github.repository }} github_pull_request_number: ${{ github.event.pull_request.number }} git_commit_hash: ${{ github.event.pull_request.head.sha }} - model: "gemini-1.0-pro-latest" + model: "gemini-1.5-pro-latest" pull_request_diff: |- ${{ steps.get_diff.outputs.pull_request_diff }} pull_request_chunk_size: "3500" diff --git a/README.md b/README.md index 8fe76f8..552abcc 100644 --- a/README.md +++ b/README.md @@ -61,7 +61,7 @@ jobs: git diff "origin/${{ env.PULL_REQUEST_HEAD_REF }}" > "diff.txt" # shellcheck disable=SC2086 echo "diff=$(cat "diff.txt")" >> $GITHUB_ENV - - uses: rubensflinco/gemini-code-review-action@1.0.4 + - uses: rubensflinco/gemini-code-review-action@1.0.5 name: "Code Review by Gemini AI" id: review with: @@ -70,7 +70,7 @@ jobs: github_repository: ${{ github.repository }} github_pull_request_number: ${{ github.event.pull_request.number }} git_commit_hash: ${{ github.event.pull_request.head.sha }} - model: "gemini-1.0-pro-latest" + model: "gemini-1.5-pro-latest" pull_request_diff: |- ${{ steps.get_diff.outputs.pull_request_diff }} pull_request_chunk_size: "3500" diff --git a/action.yml b/action.yml index 4c2ca26..5ffc2a3 100644 --- a/action.yml +++ b/action.yml @@ -37,11 +37,31 @@ inputs: model: description: "GPT model" required: true - default: "gemini-1.5-pro-latest" + default: "text-davinci-003" extra_prompt: description: "Extra prompt for GPT" required: false default: "" + temperature: + description: "Temperature for GPT" + required: false + default: "0.7" + max_tokens: + description: "Max tokens for GPT" + required: false + default: "256" + top_p: + description: "Top p for GPT" + required: false + default: "1" + frequency_penalty: + description: "Frequency penalty for GPT" + required: false + default: "0.0" + presence_penalty: + description: "Presence penalty for GPT" + required: false + default: "0.0" pull_request_diff: description: "Pull request diff" required: true @@ -78,5 +98,10 @@ runs: args: - "--model=${{ inputs.model }}" - "--extra-prompt=${{ inputs.extra_prompt }}" + - "--temperature=${{ inputs.temperature }}" + - "--max-tokens=${{ inputs.max_tokens }}" + - "--top-p=${{ inputs.top_p }}" + - "--frequency-penalty=${{ inputs.frequency_penalty }}" + - "--presence-penalty=${{ inputs.presence_penalty }}" - "--diff-chunk-size=${{ inputs.pull_request_chunk_size }}" - "--diff=${{ inputs.pull_request_diff }}" diff --git a/entrypoint.py b/entrypoint.py index e0fae1f..9335720 100755 --- a/entrypoint.py +++ b/entrypoint.py @@ -36,12 +36,12 @@ def check_required_env_vars(): def get_review_prompt(extra_prompt: str = "") -> str: """Get a prompt template""" template = f""" - This is a pull request or a part of a pull request if the pull request is too large. - Please assume you review this PR as a great software engineer and a great security engineer. + This is a pull request or part of a pull request if the pull request is very large. + Suppose you review this PR as an excellent software engineer and an excellent security engineer. Can you tell me the issues with differences in a pull request and provide suggestions to improve it? - You can provide a summary of the review and comments about issues by file, if any important issues are found. - - {extra_prompt} + You can provide a review summary and issue comments per file if any major issues are found. + Always include the name of the file that is citing the improvement or problem. + In the next messages I will be sending you the difference between the GitHub file codes, okay? """ return template @@ -49,10 +49,9 @@ def get_review_prompt(extra_prompt: str = "") -> str: def get_summarize_prompt() -> str: """Get a prompt template""" template = """ - This is a pull request of a set of reviews of a pull request. - Those are generated by Gemini AI. - Can you summarized them? - It would be good to focus on highlighting issues and providing suggestions to improve the pull request. + Can you summarize this for me? + It would be good to stick to highlighting pressing issues and providing code suggestions to improve the pull request. + Here's what you need to summarize: """ return template @@ -99,35 +98,47 @@ def get_review( ): """Get a review""" # Chunk the prompt - genai_model = genai.GenerativeModel(model) review_prompt = get_review_prompt(extra_prompt=extra_prompt) chunked_diff_list = chunk_string(input_string=diff, chunk_size=prompt_chunk_size) + generation_config = { + "temperature": 1, + "top_p": 0.95, + "top_k": 0, + "max_output_tokens": 8192, + } + genai_model = genai.GenerativeModel(model_name=model,generation_config=generation_config,system_instruction=extra_prompt) # Get summary by chunk chunked_reviews = [] for chunked_diff in chunked_diff_list: - prompt = str(f""" - {str(review_prompt)} - - ```{str(chunked_diff)}```""") - - response = genai_model.generate_content(prompt) - review_result = response.text + convo = genai_model.start_chat(history=[ + { + "role": "user", + "parts": [review_prompt] + }, + { + "role": "model", + "parts": ["Ok"] + }, + ]) + convo.send_message(chunked_diff) + review_result = convo.last.text + logger.debug(f"Response AI: {review_result}") chunked_reviews.append(review_result) # If the chunked reviews are only one, return it + if len(chunked_reviews) == 1: return chunked_reviews, chunked_reviews[0] - # Summarize the chunked reviews - summarize_prompt = get_summarize_prompt() - chunked_reviews_JOIN = str("\n".join(chunked_reviews)) - prompt = str(f""" - {summarize_prompt} - {extra_prompt} - - ```{str(chunked_reviews_JOIN)}```""") - - response = genai_model.generate_content(prompt) - summarized_review = response.text + if len(chunked_reviews) == 0: + summarize_prompt = "Say that you didn't find any relevant changes to comment on any file" + else: + summarize_prompt = get_summarize_prompt() + + chunked_reviews_join = "\n".join(chunked_reviews) + convo = genai_model.start_chat(history=[]) + convo.send_message(summarize_prompt+"\n\n"+chunked_reviews_join) + summarized_review = convo.last.text + logger.debug(f"Response AI: {summarized_review}") return chunked_reviews, summarized_review