diff --git a/.github/workflows/pull_reqeust.default.yml b/.github/workflows/pull_reqeust.default.yml new file mode 100644 index 0000000..ff4606c --- /dev/null +++ b/.github/workflows/pull_reqeust.default.yml @@ -0,0 +1,15 @@ +on: [pull_request] + +jobs: + build: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v5 + - uses: astral-sh/setup-uv@v7 + - run: uv sync + - env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + DST_ISSUE_NUMBER: 13 + run: uv run chat-from-github-issue diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..e26e140 --- /dev/null +++ b/.gitignore @@ -0,0 +1,4 @@ +/.python-version +/uv.lock + +/Makefile diff --git a/README.md b/README.md index 7211398..72da99e 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,34 @@ # prototype For fast prototyping! + +## What? + +- Type some ideas into some "Github Issue" +- Trigger Github Actions Workflow via ``issue_comment`` + - e.g. ``hey, alex {PROMPT}`` + - e.g. ``alex, {PROMPT}`` +- The workflow convert the main description and all comments into a Chat history + +## Probeles to Solve + +1. How to test workflows? + - Espeically workflows with ``issue_comment`` trigger + +## References + +- LLM Provider + - Ollama (in use) + - https://github.com/ollama/ollama-python + - Gemini (candidate) + - https://ai.google.dev/gemini-api/docs +- Github Actions + - https://docs.github.com/en/actions/reference/workflows-and-actions/events-that-trigger-workflows + - https://docs.github.com/en/actions/concepts/runners/self-hosted-runners + - https://docs.github.com/en/actions/how-tos/manage-runners/self-hosted-runners/add-runners + - https://docs.github.com/en/actions/reference/runners/self-hosted-runners + +### How to Run Linters? + +``` +uv run pre-commit run -a -c pre-commit-config.yaml +``` diff --git a/pre-commit-config.yaml b/pre-commit-config.yaml new file mode 100644 index 0000000..4d48ac0 --- /dev/null +++ b/pre-commit-config.yaml @@ -0,0 +1,11 @@ +repos: + - repo: https://github.com/rhysd/actionlint.git + rev: v1.6.26 # Use the latest stable version of actionlint + hooks: + - id: actionlint + name: Lint GitHub Actions workflows + entry: actionlint + language: golang + types: ["yaml"] + files: ^\.github/workflows/ + args: ["-color"] # Optional: Add arguments like -color for colored output diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..e888bac --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,25 @@ +[project] +name = "chat-from-github-issue" +version = "0.1.0" +description = "Add your description here" +readme = "README.md" +authors = [ + { name = "Jonghyun Park", email = "parjong@gmail.com" } +] +requires-python = "==3.14.*" +dependencies = [ + "ollama==0.6.1", + "pygithub==2.8.1", +] + +[project.scripts] +chat-from-github-issue = "chat_from_github_issue:main" + +[build-system] +requires = ["uv_build>=0.9.3,<0.10.0"] +build-backend = "uv_build" + +[dependency-groups] +dev = [ + "pre-commit==4.3.0", +] diff --git a/src/chat_from_github_issue/__init__.py b/src/chat_from_github_issue/__init__.py new file mode 100644 index 0000000..c5d4ea1 --- /dev/null +++ b/src/chat_from_github_issue/__init__.py @@ -0,0 +1,101 @@ +# Based on https://pygithub.readthedocs.io/en/v2.8.1/introduction.html +from github import Github +from github import Auth as GithubAuth +from ollama import chat +from ollama import ChatResponse + +from os import environ +from pprint import pprint + +def chat_message_of_(comment): + # comment.body: str + # comment.author_association = OWNER + # | ... + # | NONE (for github-action bot account + # when tested from Github Enterprise + # comment.reactions: dict + # ^ Can be used to make a recap on close (persistent memory) + role = 'user' if comment.author_association != 'NONE' else 'assistant' + return { 'role': role, 'content': comment.body } +# def/ END + +def make_response_for_(messages): + if 'CI' in environ: + sample = """Hello! I'm Alex, your friendly AI assistant here to help you with whatever you need! 😊""" + return sample + # if/ END + + # References + # - https://apidog.com/blog/how-to-use-ollama/ + args = {} + + args['model'] = 'qwen3:1.7b' # To run LLM onNVIDIA GeForce GTX 1650 (4GB VRAM) + args['messages'] = messages + args['options'] = { 'num_ctx': 8 * 1024 } # Q. How to adjust this number? + + response: ChatResponse = chat(**args) + + return response.message.content +# def/ END + +def main() -> None: + # NOTE Caller SHOULD set 'GITHUB_TOKEN' before (even for Github Actions) + auth = GithubAuth.Token(environ['GITHUB_TOKEN']) + g = Github(auth=auth) + + param = {} + + param['src_issue_number'] = '13' + + repository: str = 'parjong/prototype' + src_issue_number: int = 13 + + repo = g.get_repo(repository) + issue = repo.get_issue(number=src_issue_number) + + messages = [] + + # System Prompt + messages += [ { 'role': 'system', 'content': 'You are alex, a kind assistant' } ] + + # Chat History + # Q. How to treat the main description? + # + # - As system prompt? + # - As chat history? + messages += [ { 'role': 'user', 'content': issue.body } ] + messages += [ chat_message_of_(comment) for comment in issue.get_comments() ] + + print('Messages:') + pprint(messages) + + response = make_response_for_(messages) + + print('Response:') + print(response) + + if 'DST_ISSUE_NUMBER' in environ: + dst_issue_number = int(environ['DST_ISSUE_NUMBER']) + issue = repo.get_issue(number=dst_issue_number) + issue.create_comment(response) + + # NOTE Possible to remove How to remove + # + # Variable | How? + # --- | --- + # repository | Use 'GITHUB_REPOSITORY' environment variable + # issue_number | Use action event context + # | (e.g. github.context.issue.number for 'issue_comment' trigger) + # + # From [1] + + # NOTE. System Design Pros/Cons + # + # - Pros: Minimal infrastructure + # - Cons: Redundant re-computation (cannot reuse the KV from existing turns) +# def/ END + +# References +# +# [1] https://docs.github.com/en/actions/reference/workflowsu-and-actions/variables +# [2] https://pygithub.readthedocs.io/en/v2.8.1/examples.html