transfer from monorepo
This commit is contained in:
47
.gitignore
vendored
Normal file
47
.gitignore
vendored
Normal file
@ -0,0 +1,47 @@
|
||||
.venv
|
||||
.env
|
||||
__pycache__
|
||||
.pytest_cache
|
||||
*.db
|
||||
test
|
||||
test_state.json
|
||||
task_flow.egg-info
|
||||
example_repo
|
||||
signature.js
|
||||
git-filter-repo
|
||||
task/orca/
|
||||
**/dist/
|
||||
# yarn.lock
|
||||
package-lock.json
|
||||
node_modules
|
||||
build
|
||||
migrate.sh
|
||||
*/dev.js
|
||||
executables/*
|
||||
namespace/*
|
||||
config/*
|
||||
.env.local
|
||||
taskStateInfoKeypair.json
|
||||
localKOIIDB.db
|
||||
metadata.json
|
||||
.npmrc
|
||||
*.pem
|
||||
.vscode
|
||||
.cursor
|
||||
data/chunks
|
||||
data/process
|
||||
test_state.csv
|
||||
todos-example.csv
|
||||
|
||||
|
||||
# Ignore auto-generated repository directories
|
||||
repos/
|
||||
|
||||
|
||||
# Ignore Data
|
||||
data/*
|
||||
|
||||
|
||||
venv
|
||||
|
||||
**/venv/
|
23
.prettierrc
Normal file
23
.prettierrc
Normal file
@ -0,0 +1,23 @@
|
||||
{
|
||||
"useTabs": false,
|
||||
"tabWidth": 2,
|
||||
"singleQuote": false,
|
||||
"trailingComma": "all",
|
||||
"printWidth": 120,
|
||||
"arrowParens": "always",
|
||||
"semi": true,
|
||||
"overrides": [
|
||||
{
|
||||
"files": ["*.py"],
|
||||
"options": {
|
||||
"tabWidth": 4
|
||||
}
|
||||
},
|
||||
{
|
||||
"files": ".eslintrc",
|
||||
"options": {
|
||||
"parser": "json"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
0
planner/placeholder.txt
Normal file
0
planner/placeholder.txt
Normal file
9
worker/.env.developer.example
Normal file
9
worker/.env.developer.example
Normal file
@ -0,0 +1,9 @@
|
||||
# This File is for prod-debug.js
|
||||
|
||||
TASK_ID='FGzVTXn6iZFhFo9FgWW6zoHfDkJepQkKKKPfMvDdvePv' # Easy Testing Task ID
|
||||
TEST_KEYWORDS='TEST,EZ TESTING'
|
||||
|
||||
# Set this to use your desktop node staking wallet during testing so IPFS will work
|
||||
# See https://github.com/koii-network/ezsandbox/blob/main/Lesson%201/PartIV.md#staking-wallet
|
||||
STAKING_WALLET_PATH="path to your desktop node staking wallet"
|
||||
MIDDLE_SERVER_URL="http://localhost:3000"
|
1
worker/.env.example
Normal file
1
worker/.env.example
Normal file
@ -0,0 +1 @@
|
||||
DEFAULT_BOUNTY_MARKDOWN_FILE=https://raw.githubusercontent.com/HermanL02/prometheus-swarm-bounties/master/README.md # Testing only
|
17
worker/.gitignore
vendored
Normal file
17
worker/.gitignore
vendored
Normal file
@ -0,0 +1,17 @@
|
||||
dist
|
||||
build
|
||||
node_modules
|
||||
package-lock.json
|
||||
yarn.lock
|
||||
migrate.sh
|
||||
*/dev.js
|
||||
data/*
|
||||
executables/*
|
||||
namespace/*
|
||||
config/*
|
||||
.env.local
|
||||
.env
|
||||
taskStateInfoKeypair.json
|
||||
localKOIIDB.db
|
||||
metadata.json
|
||||
.npmrc
|
26
worker/README.md
Normal file
26
worker/README.md
Normal file
@ -0,0 +1,26 @@
|
||||
# Earn Crypto with AI Agents: Prometheus Document & Summarize Task (Beta v0)
|
||||
|
||||
## Overview
|
||||
|
||||
The **Prometheus Document & Summarize Task** spins up an **AI agent** capable of continuously summarizing repositories, **earning you KOII**. Automated document summarization agents can constantly process and summarize information, increasing the value of the network _and_ your node. Our ultimate goal is to have **AI agents summarizing Koii tasks**, growing the network with **more opportunities for node operators to earn rewards**.
|
||||
|
||||
## Releases
|
||||
|
||||
### Beta v0
|
||||
|
||||
- This is the **first beta release** of the task.
|
||||
- The AI agent reads documents and generates summaries automatically.
|
||||
- Documentations are sent to the user repository.
|
||||
- Future versions will introduce **enhanced AI logic, more complex summarization tasks, and more!**
|
||||
|
||||
## Task Setup
|
||||
|
||||
**[How to set up a Claude API key and a GitHub API key for the 247 Document & Summarize Task.](https://www.koii.network/blog/Earn-Crypto-With-AI-Agent)**
|
||||
|
||||
## How It Works
|
||||
|
||||
1. The Koii Node **launches an AI agent** inside a lightweight runtime.
|
||||
2. The agent reads an active **repository list** from the bounty repository.
|
||||
3. It picks a **repository**, generates the necessary **documentation**, and submits a **Github pull request** (a request to have its documentation added to the repository).
|
||||
4. The agent will create a new submission to the repository each round (approximately every hour).
|
||||
5. Koii Nodes **earn rewards** for running the AI agent and contributing documentation.
|
1
worker/babel.config.cjs
Normal file
1
worker/babel.config.cjs
Normal file
@ -0,0 +1 @@
|
||||
module.exports = { presets: ["@babel/preset-env", "@babel/preset-typescript"] };
|
130
worker/config-task-test.yml
Normal file
130
worker/config-task-test.yml
Normal file
@ -0,0 +1,130 @@
|
||||
######################## ALL FIELDS ARE REQUIRED UNLESS OTHERWISE NOTED #########################
|
||||
|
||||
######################################### TASK METADATA #########################################
|
||||
############################ Will be displayed in the desktop node ##############################
|
||||
|
||||
## Task Name ##
|
||||
# Maximum 24 characters.
|
||||
task_name: "Prometheus Docs Agent"
|
||||
|
||||
## Task Author ##
|
||||
author: "Prometheus"
|
||||
|
||||
# Task Description Markdown ##
|
||||
# If you specify a markdown file, the description field will be ignored.
|
||||
# Markdown is recommended for better formatting.
|
||||
markdownDescriptionPath: "./README.md"
|
||||
|
||||
## Task Description ##
|
||||
# Ignored if you specify a markdown file.
|
||||
description: "Task description."
|
||||
|
||||
## Repository URL ##
|
||||
# Must be public for whitelisted tasks.
|
||||
repositoryUrl: "https://github.com/koii-network/builder-247"
|
||||
|
||||
## Image URL ##
|
||||
# 230x86 pixels.
|
||||
imageUrl: "https://koii-k2-task-metadata.s3.us-east-2.amazonaws.com/Docs.png"
|
||||
|
||||
## Info URL ##
|
||||
infoUrl: "https://www.koii.network/blog/Earn-Crypto-With-AI-Agent"
|
||||
|
||||
####################################### TASK CONFIGURATION ######################################
|
||||
|
||||
## Task Executable Network ##
|
||||
# IPFS or DEVELOPMENT
|
||||
# Keep this as IPFS unless you know you need to change it.
|
||||
task_executable_network: "IPFS"
|
||||
|
||||
## Task Audit Program ##
|
||||
# Task Executable Network IPFS: Path to your executable.
|
||||
# Task Executable Network DEVELOPMENT: The value should be 'main'.
|
||||
# Keep this as-is unless you know you need to change it.
|
||||
task_audit_program: "dist/main.js"
|
||||
|
||||
## Round Time ##
|
||||
# Duration of task, measured in slots (with each slot approximately equal to 408ms). Should be at least 800 slots.
|
||||
# See https://www.koii.network/docs/concepts/what-are-tasks/what-are-tasks/gradual-consensus for more information on how round time, audit window, and submission window work.
|
||||
round_time: 1500
|
||||
|
||||
## Audit Window ##
|
||||
# The audit window should be at least 1/3 of the round time.
|
||||
audit_window: 600
|
||||
|
||||
## Submission Window ##
|
||||
# The submission window should be at least 1/3 of the round time.
|
||||
submission_window: 600
|
||||
|
||||
## Minimum Stake Amount ##
|
||||
# The minimum amount of KOII or KPL that a user must stake in order to participate in the task.
|
||||
minimum_stake_amount: 0.01
|
||||
|
||||
## Task Bounty Type ##
|
||||
# KOII or KPL
|
||||
task_type: "KOII"
|
||||
|
||||
## Token Mint Address (ONLY for KPL tasks) ##
|
||||
# The Fire Token address is provided as an example.
|
||||
token_type: "4qayyw53kWz6GzypcejjT1cvwMXS1qYLSMQRE8se3gTv"
|
||||
|
||||
## Total Bounty Amount ##
|
||||
# The total bounty amount that will be available for distribution over all rounds.
|
||||
# Does nothing when updating a task.
|
||||
total_bounty_amount: 11
|
||||
|
||||
## Bounty Amount per Round ##
|
||||
# The maximum amount that can be distributed per round.
|
||||
# If the actual distribution per round exceeds this amount, the distribution list will fail.
|
||||
bounty_amount_per_round: 1
|
||||
|
||||
## Allowed Failed Distributions ##
|
||||
# Number of retries allowed for the distribution list if it is fails audit.
|
||||
# If all retries fail, the task will not distribute anything for the round.
|
||||
# This is also the number of rounds of submissions it will keep.
|
||||
allowed_failed_distributions: 8
|
||||
|
||||
## Space ##
|
||||
# Expected Task Data Size in MBs for the account size.
|
||||
# Minimums: 2 for whitelisted tasks, 1 for production, 0.1 for testing.
|
||||
# See https://www.koii.network/docs/develop/command-line-tool/create-task-cli/create-task#space for calculation details.
|
||||
space: 0.1
|
||||
|
||||
## Requirement Tags (Optional) ##
|
||||
# To add more global variables and task variables, please refer to the type, value, description format shown below.
|
||||
# The ORCA_TASK addon is REQUIRED
|
||||
requirementsTags:
|
||||
- type: ADDON
|
||||
value: "ORCA_TASK"
|
||||
- type: CPU
|
||||
value: "4-core"
|
||||
- type: RAM
|
||||
value: "5 GB"
|
||||
- type: STORAGE
|
||||
value: "5 GB"
|
||||
- type: TASK_VARIABLE
|
||||
value: "ANTHROPIC_API_KEY"
|
||||
description: "Your Anthropic API key. You can get one here: https://console.anthropic.com/settings/keys"
|
||||
- type: TASK_VARIABLE
|
||||
value: "GITHUB_USERNAME"
|
||||
description: "Your GitHub username. You can sign up for an account here: https://github.com/join"
|
||||
- type: TASK_VARIABLE
|
||||
value: "GITHUB_TOKEN"
|
||||
description: "Your GitHub Personal Access Token. You can create one here: https://github.com/settings/tokens"
|
||||
|
||||
## Tags ##
|
||||
# See https://www.koii.network/docs/develop/command-line-tool/create-task-cli/create-task#tags for available tag options.
|
||||
tags: ["AI"]
|
||||
|
||||
# Environment ##
|
||||
# TEST or PRODUCTION
|
||||
# Production mode will expose your task to all the task runners, even if not whitelisted.
|
||||
environment: "TEST"
|
||||
|
||||
#################################### FOR UPDATING TASKS ONLY ####################################
|
||||
|
||||
## Old Task ID ##
|
||||
task_id: "48h3f4r3AR7MdgCMkET4v3yh7PpPHuqGDWzqgH52rny1"
|
||||
|
||||
## Migration Description ##
|
||||
migrationDescription: "Fix audit bug"
|
130
worker/config-task.yml
Normal file
130
worker/config-task.yml
Normal file
@ -0,0 +1,130 @@
|
||||
######################## ALL FIELDS ARE REQUIRED UNLESS OTHERWISE NOTED #########################
|
||||
|
||||
######################################### TASK METADATA #########################################
|
||||
############################ Will be displayed in the desktop node ##############################
|
||||
|
||||
## Task Name ##
|
||||
# Maximum 24 characters.
|
||||
task_name: "Prometheus Docs Agent"
|
||||
|
||||
## Task Author ##
|
||||
author: "Prometheus"
|
||||
|
||||
# Task Description Markdown ##
|
||||
# If you specify a markdown file, the description field will be ignored.
|
||||
# Markdown is recommended for better formatting.
|
||||
markdownDescriptionPath: "./README.md"
|
||||
|
||||
## Task Description ##
|
||||
# Ignored if you specify a markdown file.
|
||||
description: "Task description."
|
||||
|
||||
## Repository URL ##
|
||||
# Must be public for whitelisted tasks.
|
||||
repositoryUrl: "https://github.com/koii-network/builder-247"
|
||||
|
||||
## Image URL ##
|
||||
# 230x86 pixels.
|
||||
imageUrl: "https://koii-k2-task-metadata.s3.us-east-2.amazonaws.com/Docs.png"
|
||||
|
||||
## Info URL ##
|
||||
infoUrl: "https://www.koii.network/blog/Earn-Crypto-With-AI-Agent"
|
||||
|
||||
####################################### TASK CONFIGURATION ######################################
|
||||
|
||||
## Task Executable Network ##
|
||||
# IPFS or DEVELOPMENT
|
||||
# Keep this as IPFS unless you know you need to change it.
|
||||
task_executable_network: "IPFS"
|
||||
|
||||
## Task Audit Program ##
|
||||
# Task Executable Network IPFS: Path to your executable.
|
||||
# Task Executable Network DEVELOPMENT: The value should be 'main'.
|
||||
# Keep this as-is unless you know you need to change it.
|
||||
task_audit_program: "dist/main.js"
|
||||
|
||||
## Round Time ##
|
||||
# Duration of task, measured in slots (with each slot approximately equal to 408ms). Should be at least 800 slots.
|
||||
# See https://www.koii.network/docs/concepts/what-are-tasks/what-are-tasks/gradual-consensus for more information on how round time, audit window, and submission window work.
|
||||
round_time: 5000
|
||||
|
||||
## Audit Window ##
|
||||
# The audit window should be at least 1/3 of the round time.
|
||||
audit_window: 2200
|
||||
|
||||
## Submission Window ##
|
||||
# The submission window should be at least 1/3 of the round time.
|
||||
submission_window: 2200
|
||||
|
||||
## Minimum Stake Amount ##
|
||||
# The minimum amount of KOII or KPL that a user must stake in order to participate in the task.
|
||||
minimum_stake_amount: 0.01
|
||||
|
||||
## Task Bounty Type ##
|
||||
# KOII or KPL
|
||||
task_type: "KOII"
|
||||
|
||||
## Token Mint Address (ONLY for KPL tasks) ##
|
||||
# The Fire Token address is provided as an example.
|
||||
token_type: "4qayyw53kWz6GzypcejjT1cvwMXS1qYLSMQRE8se3gTv"
|
||||
|
||||
## Total Bounty Amount ##
|
||||
# The total bounty amount that will be available for distribution over all rounds.
|
||||
# Does nothing when updating a task.
|
||||
total_bounty_amount: 12000
|
||||
|
||||
## Bounty Amount per Round ##
|
||||
# The maximum amount that can be distributed per round.
|
||||
# If the actual distribution per round exceeds this amount, the distribution list will fail.
|
||||
bounty_amount_per_round: 2000
|
||||
|
||||
## Allowed Failed Distributions ##
|
||||
# Number of retries allowed for the distribution list if it is fails audit.
|
||||
# If all retries fail, the task will not distribute anything for the round.
|
||||
# This is also the number of rounds of submissions it will keep.
|
||||
allowed_failed_distributions: 8
|
||||
|
||||
## Space ##
|
||||
# Expected Task Data Size in MBs for the account size.
|
||||
# Minimums: 2 for whitelisted tasks, 1 for production, 0.1 for testing.
|
||||
# See https://www.koii.network/docs/develop/command-line-tool/create-task-cli/create-task#space for calculation details.
|
||||
space: 5
|
||||
|
||||
## Requirement Tags (Optional) ##
|
||||
# To add more global variables and task variables, please refer to the type, value, description format shown below.
|
||||
# The ORCA_TASK addon is REQUIRED
|
||||
requirementsTags:
|
||||
- type: ADDON
|
||||
value: "ORCA_TASK"
|
||||
- type: CPU
|
||||
value: "4-core"
|
||||
- type: RAM
|
||||
value: "5 GB"
|
||||
- type: STORAGE
|
||||
value: "5 GB"
|
||||
- type: TASK_VARIABLE
|
||||
value: "ANTHROPIC_API_KEY"
|
||||
description: "Your Anthropic API key. You can get one here: https://console.anthropic.com/settings/keys"
|
||||
- type: TASK_VARIABLE
|
||||
value: "GITHUB_USERNAME"
|
||||
description: "Your GitHub username. You can sign up for an account here: https://github.com/join"
|
||||
- type: TASK_VARIABLE
|
||||
value: "GITHUB_TOKEN"
|
||||
description: "Your GitHub Personal Access Token. You can create one here: https://github.com/settings/tokens"
|
||||
|
||||
## Tags ##
|
||||
# See https://www.koii.network/docs/develop/command-line-tool/create-task-cli/create-task#tags for available tag options.
|
||||
tags: ["AI"]
|
||||
|
||||
# Environment ##
|
||||
# TEST or PRODUCTION
|
||||
# Production mode will expose your task to all the task runners, even if not whitelisted.
|
||||
environment: "TEST"
|
||||
|
||||
#################################### FOR UPDATING TASKS ONLY ####################################
|
||||
|
||||
## Old Task ID ##
|
||||
task_id: "62n2aAVVV42rtt53wxieotTdnKpTRjiChsHYdSxHDhAZ"
|
||||
|
||||
## Migration Description ##
|
||||
migrationDescription: "Error logging and Bug Fixing and Slack Notification"
|
7
worker/jest.config.js
Normal file
7
worker/jest.config.js
Normal file
@ -0,0 +1,7 @@
|
||||
export default {
|
||||
transform: { "^.+\\.tsx?$": "babel-jest" },
|
||||
transformIgnorePatterns: ["/node_modules/(?!@babel/runtime)"],
|
||||
moduleFileExtensions: ["ts", "tsx", "js", "jsx", "json", "node"],
|
||||
testEnvironment: "node",
|
||||
};
|
||||
|
16
worker/orca-agent/.dockerignore
Normal file
16
worker/orca-agent/.dockerignore
Normal file
@ -0,0 +1,16 @@
|
||||
**/.env
|
||||
**/.env.*
|
||||
|
||||
**/node_modules
|
||||
**/dist
|
||||
**/build
|
||||
**/*.log
|
||||
**/Dockerfile
|
||||
**/docker-compose.yml
|
||||
**/venv
|
||||
**/.venv
|
||||
**/*__pycache__
|
||||
**/.pytest_cache
|
||||
**/*.db
|
||||
**/*.egg-info
|
||||
**/*/repos/
|
26
worker/orca-agent/.env.example
Normal file
26
worker/orca-agent/.env.example
Normal file
@ -0,0 +1,26 @@
|
||||
ANTHROPIC_API_KEY=your_anthropic_api_key
|
||||
# the token requires the repo scope
|
||||
GITHUB_TOKEN=your_github_token
|
||||
GITHUB_USERNAME=your_github_username
|
||||
|
||||
# for testing only
|
||||
# these credentials must be different from the ones above
|
||||
# they are used to create and delete test repositories
|
||||
# the token requires the repo and delete_repo scopes
|
||||
UPSTREAM_GITHUB_TOKEN=your_upstream_github_token
|
||||
UPSTREAM_GITHUB_USERNAME=your_upstream_github_username
|
||||
|
||||
# for testing only
|
||||
MIDDLE_SERVER_URL=http://localhost:3000
|
||||
|
||||
TASK_SYSTEM_PROMPT="You are an AI development assistant specializing in writing code and creating GitHub pull requests.
|
||||
Follow these rules:
|
||||
1. Create a new file in the /src directory.
|
||||
2. Write a single Python function that accomplishes the assigned task.
|
||||
3. Commit and push the changes to the remote repository.
|
||||
4. Create a second new file in the /tests directory.
|
||||
5. Write a series of tests that thoroughly test the function, including edge cases and error handling, using PyTest.
|
||||
6. Commit and push the changes to the remote repository.
|
||||
7. Run the tests to ensure they pass.
|
||||
8. Continue to make commits and push them to the remote repository until the tests pass.
|
||||
9. Validate code changes before submitting"
|
1
worker/orca-agent/.gitignore
vendored
Normal file
1
worker/orca-agent/.gitignore
vendored
Normal file
@ -0,0 +1 @@
|
||||
venv
|
48
worker/orca-agent/Dockerfile
Normal file
48
worker/orca-agent/Dockerfile
Normal file
@ -0,0 +1,48 @@
|
||||
# Use the official Python image from the Docker Hub
|
||||
FROM python:3.12-slim
|
||||
|
||||
# Set the working directory in the container
|
||||
WORKDIR /app
|
||||
|
||||
# Copy the requirements.txt file into the container
|
||||
COPY requirements.txt .
|
||||
|
||||
|
||||
# Install Git and any other necessary packages
|
||||
RUN apt-get update && apt-get install -y git sudo curl
|
||||
|
||||
# Install the dependencies
|
||||
RUN pip install -r requirements.txt
|
||||
|
||||
# Configure Git to add the safe directory
|
||||
RUN git config --global --add safe.directory /app
|
||||
|
||||
# Copy the rest of your application code into the container
|
||||
COPY . .
|
||||
|
||||
ENV MIDDLE_SERVER_URL=https://builder247.api.koii.network
|
||||
|
||||
# Configure logging and output
|
||||
ENV PYTHONUNBUFFERED=1
|
||||
ENV TERM=xterm-256color
|
||||
ENV FORCE_COLOR=1
|
||||
|
||||
# Add this environment variable after other ENV declarations
|
||||
ENV DATABASE_PATH=/data/database.db
|
||||
|
||||
# Make port 8080 available to the world outside this container
|
||||
EXPOSE 8080
|
||||
|
||||
# Set the command to run your application
|
||||
CMD ["gunicorn", \
|
||||
"--log-level=error", \
|
||||
"--error-logfile=-", \
|
||||
"--capture-output", \
|
||||
"--enable-stdio-inheritance", \
|
||||
"--logger-class=gunicorn.glogging.Logger", \
|
||||
"--timeout", "600", \
|
||||
"--graceful-timeout", "600", \
|
||||
"--keep-alive", "5", \
|
||||
"-w", "1", \
|
||||
"-b", "0.0.0.0:8080", \
|
||||
"main:app"]
|
0
worker/orca-agent/README.md
Normal file
0
worker/orca-agent/README.md
Normal file
6
worker/orca-agent/main.py
Normal file
6
worker/orca-agent/main.py
Normal file
@ -0,0 +1,6 @@
|
||||
from src.server import create_app
|
||||
|
||||
app = create_app()
|
||||
|
||||
if __name__ == "__main__":
|
||||
app.run(host="0.0.0.0", port=8080, debug=True)
|
18
worker/orca-agent/requirements.txt
Normal file
18
worker/orca-agent/requirements.txt
Normal file
@ -0,0 +1,18 @@
|
||||
anthropic>=0.8.1
|
||||
python-dotenv>=1.0.0
|
||||
pandas>=2.0.0
|
||||
tiktoken>=0.5.2
|
||||
pytest>=8.0.2
|
||||
typing-extensions>=4.12.2
|
||||
GitPython>=3.1.44
|
||||
pygithub>=2.5.0
|
||||
Flask>=3.0.0
|
||||
requests>=2.32.0
|
||||
cryptography>=42.0.0
|
||||
gunicorn>=22.0.0
|
||||
solders>=0.26.0
|
||||
base58>=2.1.0
|
||||
tenacity>=9.0.0
|
||||
sqlmodel>=0.0.22
|
||||
openai>=0.28.0
|
||||
colorama>=0.4.6
|
118
worker/orca-agent/setup.md
Normal file
118
worker/orca-agent/setup.md
Normal file
@ -0,0 +1,118 @@
|
||||
# 247 Builder
|
||||
|
||||
## Developing locally
|
||||
|
||||
Navigate to the correct directory:
|
||||
|
||||
```sh
|
||||
cd builder/container
|
||||
```
|
||||
|
||||
Set up a virtual environment and activate it:
|
||||
|
||||
```sh
|
||||
python3 -m venv .venv
|
||||
source .venv/bin/activate
|
||||
```
|
||||
|
||||
Install dependencies:
|
||||
|
||||
```sh
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
Run tests:
|
||||
|
||||
```sh
|
||||
python3 -m pytest tests/
|
||||
```
|
||||
|
||||
Run the agent:
|
||||
|
||||
```sh
|
||||
python3 main.py
|
||||
```
|
||||
|
||||
## Developing in Docker
|
||||
|
||||
### Running the Flask Server
|
||||
|
||||
Navigate to the correct directory:
|
||||
|
||||
```sh
|
||||
cd builder/container
|
||||
```
|
||||
|
||||
Build the image:
|
||||
|
||||
```sh
|
||||
docker build -t builder247 .
|
||||
```
|
||||
|
||||
Run the container:
|
||||
|
||||
```sh
|
||||
docker run builder247
|
||||
```
|
||||
|
||||
You can also run with a mounted volume if you'd like to change files without updating the container:
|
||||
|
||||
```sh
|
||||
docker run -v $(pwd):/app builder247
|
||||
```
|
||||
|
||||
### Running Interactively (using the shell)
|
||||
|
||||
Navigate to the correct directory:
|
||||
|
||||
```sh
|
||||
cd builder/container
|
||||
```
|
||||
|
||||
Change this line in the Dockerfile:
|
||||
|
||||
```sh
|
||||
CMD ["python", "main.py"]
|
||||
```
|
||||
|
||||
to
|
||||
|
||||
```sh
|
||||
CMD ["/bin/bash"]
|
||||
```
|
||||
|
||||
Build the image:
|
||||
|
||||
```sh
|
||||
docker build -t builder247.
|
||||
```
|
||||
|
||||
Run the container with a mounted volume:
|
||||
|
||||
```sh
|
||||
docker run -it -v $(pwd)/builder:/app builder247
|
||||
```
|
||||
|
||||
This will give you access to your files within the container and run the container in interactive mode with shell access. You can then run tests inside the container using:
|
||||
|
||||
```sh
|
||||
python -m pytest tests/
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```sh
|
||||
python3 -m pytest tests/
|
||||
```
|
||||
|
||||
You can also run the flask server in the container with:
|
||||
|
||||
```sh
|
||||
python main.py
|
||||
```
|
||||
|
||||
To exit the container's shell:
|
||||
|
||||
```sh
|
||||
exit
|
||||
```
|
8
worker/orca-agent/setup.py
Normal file
8
worker/orca-agent/setup.py
Normal file
@ -0,0 +1,8 @@
|
||||
from setuptools import setup, find_packages
|
||||
|
||||
setup(
|
||||
name="task-flow",
|
||||
version="0.1",
|
||||
packages=find_packages(include=["src", "src.*"]),
|
||||
python_requires=">=3.6",
|
||||
)
|
22
worker/orca-agent/src/dababase/models.py
Normal file
22
worker/orca-agent/src/dababase/models.py
Normal file
@ -0,0 +1,22 @@
|
||||
"""Database models."""
|
||||
|
||||
from datetime import datetime
|
||||
from typing import Optional, List
|
||||
from sqlmodel import SQLModel, Field, Relationship
|
||||
from sqlalchemy import JSON
|
||||
from sqlalchemy import Column
|
||||
from prometheus_swarm.database.models import Conversation, Message, Log
|
||||
|
||||
|
||||
class Submission(SQLModel, table=True):
|
||||
"""Task submission model."""
|
||||
|
||||
task_id: str
|
||||
round_number: int = Field(primary_key=True)
|
||||
status: str = "pending"
|
||||
pr_url: Optional[str] = None
|
||||
username: Optional[str] = None
|
||||
repo_urls: Optional[dict] = Field(
|
||||
default=None, sa_column=Column(JSON)
|
||||
) # Store as JSON type
|
||||
repo_url: Optional[str] = None
|
70
worker/orca-agent/src/server/__init__.py
Normal file
70
worker/orca-agent/src/server/__init__.py
Normal file
@ -0,0 +1,70 @@
|
||||
"""Flask application initialization."""
|
||||
|
||||
from flask import Flask, request
|
||||
from .routes import repo_summary, star, audit, healthz, submission
|
||||
from prometheus_swarm.utils.logging import configure_logging, log_section, log_key_value, log_value
|
||||
from prometheus_swarm.database import initialize_database
|
||||
from colorama import Fore, Style
|
||||
import uuid
|
||||
import os
|
||||
|
||||
|
||||
def create_app():
|
||||
"""Create and configure the Flask application."""
|
||||
app = Flask(__name__)
|
||||
|
||||
# Add request ID middleware
|
||||
@app.before_request
|
||||
def before_request():
|
||||
request.id = str(uuid.uuid4())
|
||||
# Store request start time for duration calculation
|
||||
request.start_time = request.environ.get("REQUEST_TIME", 0)
|
||||
|
||||
@app.after_request
|
||||
def after_request(response):
|
||||
# Calculate request duration
|
||||
duration = (request.environ.get("REQUEST_TIME", 0) - request.start_time) * 1000
|
||||
|
||||
# Get error message if this is an error response
|
||||
error_msg = ""
|
||||
if response.status_code >= 400:
|
||||
try:
|
||||
json_data = response.get_json()
|
||||
if isinstance(json_data, dict):
|
||||
error_msg = json_data.get("error") or json_data.get("message", "")
|
||||
except Exception:
|
||||
# If we can't get JSON data, try to get the message from the response
|
||||
error_msg = getattr(response, "description", "")
|
||||
|
||||
# Log the request with appropriate color
|
||||
color = Fore.GREEN if response.status_code < 400 else Fore.RED
|
||||
log_value(
|
||||
f"[{color}REQ{Style.RESET_ALL}] {request.method} {request.path} "
|
||||
f"{color}{response.status_code}{Style.RESET_ALL} {error_msg} {duration}ms"
|
||||
)
|
||||
|
||||
return response
|
||||
|
||||
# Register blueprints
|
||||
app.register_blueprint(healthz.bp)
|
||||
app.register_blueprint(repo_summary.bp)
|
||||
app.register_blueprint(star.bp)
|
||||
app.register_blueprint(audit.bp)
|
||||
app.register_blueprint(submission.bp)
|
||||
|
||||
# Configure logging within app context
|
||||
with app.app_context():
|
||||
# Set up logging (includes both console and database logging)
|
||||
configure_logging()
|
||||
# Initialize database
|
||||
initialize_database()
|
||||
# Disable Flask's default logging
|
||||
app.logger.disabled = True
|
||||
|
||||
# Log startup information
|
||||
log_section("SERVER STARTUP")
|
||||
log_key_value("Workers", 1)
|
||||
log_key_value("Host", "0.0.0.0:8080")
|
||||
log_key_value("Database", os.getenv("DATABASE_PATH", "Not configured"))
|
||||
|
||||
return app
|
65
worker/orca-agent/src/server/models/Log.py
Normal file
65
worker/orca-agent/src/server/models/Log.py
Normal file
@ -0,0 +1,65 @@
|
||||
"""Database model for logging."""
|
||||
|
||||
from datetime import datetime
|
||||
from prometheus_swarm.database import get_db
|
||||
|
||||
|
||||
def init_logs_table():
|
||||
"""Initialize the logs table if it doesn't exist."""
|
||||
# Not needed - handled by SQLModel
|
||||
pass
|
||||
|
||||
|
||||
def save_log(
|
||||
level: str,
|
||||
message: str,
|
||||
module: str = None,
|
||||
function: str = None,
|
||||
path: str = None,
|
||||
line_no: int = None,
|
||||
exception: str = None,
|
||||
stack_trace: str = None,
|
||||
request_id: str = None,
|
||||
additional_data: str = None,
|
||||
) -> bool:
|
||||
"""
|
||||
Save a log entry to the database.
|
||||
|
||||
Args:
|
||||
level: Log level (ERROR, WARNING, INFO, etc)
|
||||
message: Log message
|
||||
module: Module name where log was generated
|
||||
function: Function name where log was generated
|
||||
path: File path where log was generated
|
||||
line_no: Line number where log was generated
|
||||
exception: Exception type if any
|
||||
stack_trace: Stack trace if any
|
||||
request_id: Request ID if available
|
||||
additional_data: Any additional JSON-serializable data
|
||||
|
||||
Returns:
|
||||
bool: True if log was saved successfully
|
||||
"""
|
||||
try:
|
||||
db = get_db()
|
||||
from prometheus_swarm.database import Log
|
||||
|
||||
log = Log(
|
||||
timestamp=datetime.utcnow(),
|
||||
level=level,
|
||||
message=message,
|
||||
module=module,
|
||||
function=function,
|
||||
path=path,
|
||||
line_no=line_no,
|
||||
exception=exception,
|
||||
stack_trace=stack_trace,
|
||||
request_id=request_id,
|
||||
additional_data=additional_data,
|
||||
)
|
||||
db.add(log)
|
||||
db.commit()
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"Failed to save log to database: {e}") # Fallback logging
|
||||
return False
|
62
worker/orca-agent/src/server/routes/audit.py
Normal file
62
worker/orca-agent/src/server/routes/audit.py
Normal file
@ -0,0 +1,62 @@
|
||||
from flask import Blueprint, jsonify, request
|
||||
from src.server.services.github_service import verify_pr_ownership
|
||||
from src.server.services.audit_service import audit_repo
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
bp = Blueprint("audit", __name__)
|
||||
|
||||
|
||||
@bp.post("/audit/<round_number>")
|
||||
def audit_submission(round_number: int):
|
||||
logger.info("Auditing submission")
|
||||
|
||||
data = request.get_json()
|
||||
submission = data.get("submission")
|
||||
|
||||
if not submission:
|
||||
return jsonify({"error": "Missing submission"}), 400
|
||||
|
||||
# submission_round_number = submission.get("roundNumber")
|
||||
task_id = submission.get("taskId")
|
||||
pr_url = submission.get("prUrl")
|
||||
github_username = submission.get("githubUsername")
|
||||
|
||||
# Extract repo owner and name from PR URL
|
||||
try:
|
||||
pr_url_parts = pr_url.split('github.com/')[1].split('/')
|
||||
repo_owner = pr_url_parts[0]
|
||||
repo_name = pr_url_parts[1]
|
||||
except (IndexError, AttributeError):
|
||||
return jsonify({"error": "Invalid PR URL format"}), 400
|
||||
print(f"Repo owner: {repo_owner}, Repo name: {repo_name}")
|
||||
# This is commented out because the round number might be different due to we put the audit logic in the distribution part
|
||||
# if int(round_number) != submission_round_number:
|
||||
# return jsonify({"error": "Round number mismatch"}), 400
|
||||
|
||||
if (
|
||||
not task_id
|
||||
or not pr_url
|
||||
or not github_username
|
||||
or not repo_owner
|
||||
or not repo_name
|
||||
):
|
||||
return jsonify({"error": "Missing submission data"}), 400
|
||||
|
||||
is_valid = verify_pr_ownership(
|
||||
pr_url=pr_url,
|
||||
expected_username=github_username,
|
||||
expected_owner=repo_owner,
|
||||
expected_repo=repo_name,
|
||||
)
|
||||
|
||||
if not is_valid:
|
||||
return jsonify(False)
|
||||
|
||||
try:
|
||||
is_approved = audit_repo(pr_url)
|
||||
return jsonify(is_approved), 200
|
||||
except Exception as e:
|
||||
logger.error(f"Error auditing PR: {str(e)}")
|
||||
return jsonify(True), 200
|
14
worker/orca-agent/src/server/routes/healthz.py
Normal file
14
worker/orca-agent/src/server/routes/healthz.py
Normal file
@ -0,0 +1,14 @@
|
||||
from flask import Blueprint, jsonify
|
||||
from prometheus_swarm.database import get_db
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
bp = Blueprint("healthz", __name__)
|
||||
|
||||
|
||||
@bp.post("/healthz")
|
||||
def healthz():
|
||||
# Test database connection
|
||||
_ = get_db()
|
||||
return jsonify({"status": "ok"})
|
54
worker/orca-agent/src/server/routes/repo_summary.py
Normal file
54
worker/orca-agent/src/server/routes/repo_summary.py
Normal file
@ -0,0 +1,54 @@
|
||||
from flask import Blueprint, jsonify, request
|
||||
from src.server.services import repo_summary_service
|
||||
|
||||
bp = Blueprint("repo_summary", __name__)
|
||||
|
||||
|
||||
@bp.post("/repo_summary/<round_number>")
|
||||
def start_task(round_number):
|
||||
logger = repo_summary_service.logger
|
||||
logger.info(f"Task started for round: {round_number}")
|
||||
|
||||
data = request.get_json()
|
||||
logger.info(f"Task data: {data}")
|
||||
required_fields = [
|
||||
"taskId",
|
||||
"round_number",
|
||||
"repo_url"
|
||||
]
|
||||
if any(data.get(field) is None for field in required_fields):
|
||||
return jsonify({"error": "Missing data"}), 401
|
||||
|
||||
result = repo_summary_service.handle_task_creation(
|
||||
task_id=data["taskId"],
|
||||
round_number=int(round_number),
|
||||
repo_url=data["repo_url"],
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
if __name__ == "__main__":
|
||||
from flask import Flask
|
||||
|
||||
# Create a Flask app instance
|
||||
app = Flask(__name__)
|
||||
app.register_blueprint(bp)
|
||||
|
||||
# Test data
|
||||
test_data = {
|
||||
"taskId": "fake",
|
||||
"round_number": "1",
|
||||
"repo_url": "https://github.com/koii-network/docs"
|
||||
}
|
||||
|
||||
# Set up test context
|
||||
with app.test_client() as client:
|
||||
# Make a POST request to the endpoint
|
||||
response = client.post(
|
||||
"/repo_summary/1",
|
||||
json=test_data
|
||||
)
|
||||
|
||||
# Print the response
|
||||
print(f"Status Code: {response.status_code}")
|
||||
print(f"Response: {response.get_json()}")
|
39
worker/orca-agent/src/server/routes/star.py
Normal file
39
worker/orca-agent/src/server/routes/star.py
Normal file
@ -0,0 +1,39 @@
|
||||
from prometheus_swarm.utils.logging import log_key_value
|
||||
from flask import Blueprint, jsonify, request
|
||||
from src.server.services import star_service
|
||||
|
||||
bp = Blueprint("star", __name__)
|
||||
|
||||
|
||||
@bp.post("/star/<round_number>")
|
||||
def start_task(round_number):
|
||||
logger = star_service.logger
|
||||
logger.info(f"Task started for round: {round_number}")
|
||||
|
||||
data = request.get_json()
|
||||
logger.info(f"Task data: {data}")
|
||||
required_fields = [
|
||||
"taskId",
|
||||
"round_number",
|
||||
"github_urls",
|
||||
]
|
||||
if any(data.get(field) is None for field in required_fields):
|
||||
return jsonify({"error": "Missing data"}), 401
|
||||
|
||||
try:
|
||||
# Log incoming data
|
||||
print("Received data:", data)
|
||||
print("Round number:", round_number)
|
||||
|
||||
result = star_service.handle_star_task(
|
||||
task_id=data["taskId"],
|
||||
round_number=int(round_number),
|
||||
github_urls=data["github_urls"],
|
||||
)
|
||||
return result
|
||||
except Exception as e:
|
||||
print(f"Error in star endpoint: {str(e)}")
|
||||
print(f"Error type: {type(e)}")
|
||||
import traceback
|
||||
print(f"Traceback: {traceback.format_exc()}")
|
||||
return jsonify({'error': str(e)}), 500
|
38
worker/orca-agent/src/server/routes/submission.py
Normal file
38
worker/orca-agent/src/server/routes/submission.py
Normal file
@ -0,0 +1,38 @@
|
||||
from flask import Blueprint, jsonify
|
||||
from prometheus_swarm.database import get_db
|
||||
from src.dababase.models import Submission
|
||||
import logging
|
||||
import os
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
bp = Blueprint("submission", __name__)
|
||||
|
||||
|
||||
@bp.get("/submission/<roundNumber>")
|
||||
def fetch_submission(roundNumber):
|
||||
logger.info(f"Fetching submission for round: {roundNumber}")
|
||||
db = get_db()
|
||||
submission = (
|
||||
db.query(Submission)
|
||||
.filter(
|
||||
Submission.round_number == int(roundNumber),
|
||||
)
|
||||
.first()
|
||||
)
|
||||
logger.info(f"Submission: {submission}")
|
||||
logger.info(f"Submission: {submission}")
|
||||
if submission:
|
||||
|
||||
github_username = os.getenv("GITHUB_USERNAME")
|
||||
return jsonify(
|
||||
{
|
||||
"taskId": submission.task_id,
|
||||
"roundNumber": submission.round_number,
|
||||
"status": submission.status,
|
||||
"prUrl": submission.pr_url,
|
||||
"githubUsername": github_username,
|
||||
}
|
||||
)
|
||||
else:
|
||||
return jsonify({"error": "Submission not found"}), 409
|
47
worker/orca-agent/src/server/services/audit_service.py
Normal file
47
worker/orca-agent/src/server/services/audit_service.py
Normal file
@ -0,0 +1,47 @@
|
||||
"""Audit service module."""
|
||||
|
||||
import logging
|
||||
from prometheus_swarm.clients import setup_client
|
||||
from src.workflows.repoSummarizerAudit.workflow import repoSummarizerAuditWorkflow
|
||||
from src.workflows.repoSummarizerAudit.prompts import (
|
||||
PROMPTS as REPO_SUMMARIZER_AUDIT_PROMPTS,
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def audit_repo(pr_url):
|
||||
# def review_pr(repo_urls, pr_url, github_username, star_only=True):
|
||||
"""Review PR and decide if it should be accepted, revised, or rejected."""
|
||||
try:
|
||||
# Set up client and workflow
|
||||
client = setup_client("anthropic")
|
||||
|
||||
# Below commented out because we won't need to distribute starring repo nodes
|
||||
# star_repo_workflow = StarRepoAuditWorkflow(
|
||||
# client=client,
|
||||
# prompts=STAR_REPO_AUDIT_PROMPTS,
|
||||
# repo_url=repo_urls[0],
|
||||
# github_username=github_username,
|
||||
# )
|
||||
# star_repo_workflow.run()
|
||||
|
||||
repo_summerizer_audit_workflow = repoSummarizerAuditWorkflow(
|
||||
client=client,
|
||||
prompts=REPO_SUMMARIZER_AUDIT_PROMPTS,
|
||||
pr_url=pr_url,
|
||||
)
|
||||
|
||||
# Run workflow and get result
|
||||
result = repo_summerizer_audit_workflow.run()
|
||||
recommendation = result["data"]["recommendation"]
|
||||
return recommendation
|
||||
except Exception as e:
|
||||
logger.error(f"PR review failed: {str(e)}")
|
||||
raise Exception("PR review failed")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# review_pr(["https://github.com/alexander-morris/koii-dumper-reveal"], "https://github.com/koii-network/namespace-wrapper/pull/1", "HermanL02")
|
||||
|
||||
audit_repo("https://github.com/koii-network/namespace-wrapper/pull/1")
|
44
worker/orca-agent/src/server/services/github_service.py
Normal file
44
worker/orca-agent/src/server/services/github_service.py
Normal file
@ -0,0 +1,44 @@
|
||||
import re
|
||||
import requests
|
||||
from github import Github
|
||||
import os
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def verify_pr_ownership(
|
||||
pr_url,
|
||||
expected_username,
|
||||
expected_owner,
|
||||
expected_repo,
|
||||
):
|
||||
try:
|
||||
gh = Github(os.environ.get("GITHUB_TOKEN"))
|
||||
|
||||
match = re.match(r"https://github.com/([^/]+)/([^/]+)/pull/(\d+)", pr_url)
|
||||
if not match:
|
||||
logger.error(f"Invalid PR URL: {pr_url}")
|
||||
return False
|
||||
|
||||
owner, repo_name, pr_number = match.groups()
|
||||
|
||||
if owner != expected_owner or repo_name != expected_repo:
|
||||
logger.error(
|
||||
f"PR URL mismatch: {pr_url} != {expected_owner}/{expected_repo}"
|
||||
)
|
||||
return False
|
||||
|
||||
repo = gh.get_repo(f"{owner}/{repo_name}")
|
||||
pr = repo.get_pull(int(pr_number))
|
||||
|
||||
if pr.user.login != expected_username:
|
||||
logger.error(
|
||||
f"PR username mismatch: {pr.user.login} != {expected_username}"
|
||||
)
|
||||
return False
|
||||
return True
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Error verifying PR ownership: {str(e)}")
|
||||
return True
|
@ -0,0 +1,60 @@
|
||||
"""Task service module."""
|
||||
|
||||
import requests
|
||||
import os
|
||||
from flask import jsonify
|
||||
from prometheus_swarm.database import get_db
|
||||
from prometheus_swarm.clients import setup_client
|
||||
from src.workflows.repoSummarizer.workflow import RepoSummarizerWorkflow
|
||||
from prometheus_swarm.utils.logging import logger, log_error
|
||||
from dotenv import load_dotenv
|
||||
from src.workflows.repoSummarizer.prompts import PROMPTS
|
||||
from src.dababase.models import Submission
|
||||
|
||||
load_dotenv()
|
||||
|
||||
|
||||
def handle_task_creation(task_id, round_number, repo_url):
|
||||
"""Handle task creation request."""
|
||||
try:
|
||||
db = get_db()
|
||||
client = setup_client("anthropic")
|
||||
|
||||
workflow = RepoSummarizerWorkflow(
|
||||
client=client,
|
||||
prompts=PROMPTS,
|
||||
repo_url=repo_url,
|
||||
)
|
||||
|
||||
result = workflow.run()
|
||||
if result.get("success"):
|
||||
submission = Submission(
|
||||
task_id=task_id,
|
||||
round_number=round_number,
|
||||
status="summarized",
|
||||
repo_url=repo_url,
|
||||
pr_url=result["data"]["pr_url"],
|
||||
)
|
||||
db.add(submission)
|
||||
db.commit()
|
||||
return jsonify({"success": True, "result": result})
|
||||
else:
|
||||
return jsonify(
|
||||
{"success": False, "result": result.get("error", "No result")}
|
||||
)
|
||||
except Exception as e:
|
||||
logger.error(f"Repo summarizer failed: {str(e)}")
|
||||
raise
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from flask import Flask
|
||||
|
||||
app = Flask(__name__)
|
||||
with app.app_context():
|
||||
result = handle_task_creation(
|
||||
task_id="1",
|
||||
round_number=6,
|
||||
repo_url="https://github.com/koii-network/builder-test",
|
||||
)
|
||||
print(result)
|
50
worker/orca-agent/src/server/services/star_service.py
Normal file
50
worker/orca-agent/src/server/services/star_service.py
Normal file
@ -0,0 +1,50 @@
|
||||
"""Task service module."""
|
||||
|
||||
import requests
|
||||
import os
|
||||
from flask import jsonify
|
||||
from prometheus_swarm.database import get_db
|
||||
from prometheus_swarm.clients import setup_client
|
||||
from src.workflows.repoSummarizer.workflow import RepoSummarizerWorkflow
|
||||
from prometheus_swarm.utils.logging import logger, log_error
|
||||
from src.workflows.starRepo.workflow import StarRepoWorkflow
|
||||
from dotenv import load_dotenv
|
||||
from src.workflows.repoSummarizer.prompts import PROMPTS
|
||||
|
||||
load_dotenv()
|
||||
|
||||
|
||||
def handle_star_task(task_id, round_number, github_urls):
|
||||
"""Handle task creation request."""
|
||||
try:
|
||||
db = get_db()
|
||||
client = setup_client("anthropic")
|
||||
for url in github_urls:
|
||||
star_workflow = StarRepoWorkflow(
|
||||
client=client,
|
||||
prompts=PROMPTS,
|
||||
repo_url=url,
|
||||
)
|
||||
star_result = star_workflow.run()
|
||||
if not star_result or not star_result.get("success"):
|
||||
log_error(
|
||||
Exception(star_result.get("error", "No result")),
|
||||
"Repository star failed",
|
||||
)
|
||||
return jsonify({"success": True, "result": "Repository starred"})
|
||||
except Exception as e:
|
||||
logger.error(f"Repo summarizer failed: {str(e)}")
|
||||
raise
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from flask import Flask
|
||||
|
||||
app = Flask(__name__)
|
||||
with app.app_context():
|
||||
result = handle_star_task(
|
||||
task_id="1",
|
||||
round_number=6,
|
||||
github_urls=["https://github.com/koii-network/builder-test"],
|
||||
)
|
||||
print(result)
|
93
worker/orca-agent/src/types.py
Normal file
93
worker/orca-agent/src/types.py
Normal file
@ -0,0 +1,93 @@
|
||||
from typing import Dict, Any, Optional, List, TypedDict, Union, Literal, Callable
|
||||
|
||||
|
||||
class ToolDefinition(TypedDict):
|
||||
"""Standard internal tool definition format."""
|
||||
|
||||
name: str
|
||||
description: str
|
||||
parameters: Dict[str, str] # JSON Schema object
|
||||
required: List[str]
|
||||
final_tool: bool
|
||||
function: Callable
|
||||
|
||||
|
||||
class ToolCall(TypedDict):
|
||||
"""Format for a tool call made by the LLM."""
|
||||
|
||||
id: str # Unique identifier for this tool call
|
||||
name: str # name of tool being called
|
||||
arguments: Dict[str, Any]
|
||||
|
||||
|
||||
class ToolOutput(TypedDict):
|
||||
"""Standard output format for all tools.
|
||||
|
||||
All tools must return a response in this format.
|
||||
The message field contains a human-readable description of what happened,
|
||||
which will be an error message if success is False.
|
||||
"""
|
||||
|
||||
success: bool # Whether the tool execution was successful
|
||||
message: str # Human-readable message about what happened (error message if success is False)
|
||||
data: Optional[Dict[str, Any]] # Optional structured data from the tool
|
||||
|
||||
|
||||
class ToolResponse(TypedDict):
|
||||
"""Format for a tool execution response.
|
||||
|
||||
Wraps a tool's output with its call ID for client handling.
|
||||
"""
|
||||
|
||||
tool_call_id: str # ID of the tool call this is responding to
|
||||
output: ToolOutput # The actual output from the tool
|
||||
|
||||
|
||||
class PhaseResult(TypedDict):
|
||||
"""Format for a phase result."""
|
||||
|
||||
success: bool
|
||||
data: Dict[str, Any]
|
||||
error: Optional[str]
|
||||
|
||||
|
||||
class ToolChoice(TypedDict):
|
||||
"""Configuration for tool usage."""
|
||||
|
||||
type: Literal["optional", "required", "required_any"]
|
||||
tool: Optional[str] # Required only when type is "required"
|
||||
|
||||
|
||||
class ToolConfig(TypedDict):
|
||||
"""Configuration for tool usage."""
|
||||
|
||||
tool_definitions: List[ToolDefinition]
|
||||
tool_choice: ToolChoice
|
||||
|
||||
|
||||
class TextContent(TypedDict):
|
||||
"""Format for plain text content."""
|
||||
|
||||
type: Literal["text"]
|
||||
text: str
|
||||
|
||||
|
||||
class ToolCallContent(TypedDict):
|
||||
"""Format for tool call content."""
|
||||
|
||||
type: Literal["tool_call"]
|
||||
tool_call: ToolCall
|
||||
|
||||
|
||||
class ToolResponseContent(TypedDict):
|
||||
"""Format for tool response content."""
|
||||
|
||||
type: Literal["tool_response"]
|
||||
tool_response: ToolResponse
|
||||
|
||||
|
||||
class MessageContent(TypedDict):
|
||||
"""Standard internal message format."""
|
||||
|
||||
role: Literal["user", "assistant", "system", "tool"]
|
||||
content: Union[str, List[Union[TextContent, ToolCall, ToolResponseContent]]]
|
52
worker/orca-agent/src/workflows/repoSummarizer/__main__.py
Normal file
52
worker/orca-agent/src/workflows/repoSummarizer/__main__.py
Normal file
@ -0,0 +1,52 @@
|
||||
"""Entry point for the todo creator workflow."""
|
||||
|
||||
import sys
|
||||
import argparse
|
||||
from dotenv import load_dotenv
|
||||
from src.workflows.repoSummarizer.workflow import RepoSummarizerWorkflow
|
||||
from src.workflows.repoSummarizer.prompts import PROMPTS
|
||||
from prometheus_swarm.clients import setup_client
|
||||
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
|
||||
|
||||
def main():
|
||||
"""Run the todo creator workflow."""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Create tasks from a feature specification for a GitHub repository"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--repo",
|
||||
type=str,
|
||||
required=True,
|
||||
help="GitHub repository URL (e.g., https://github.com/owner/repo)",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--model",
|
||||
type=str,
|
||||
default="anthropic",
|
||||
choices=["anthropic", "openai", "xai"],
|
||||
help="Model provider to use (default: anthropic)",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
# Initialize client
|
||||
client = setup_client(args.model)
|
||||
|
||||
# Run the todo creator workflow
|
||||
workflow = RepoSummarizerWorkflow(
|
||||
client=client,
|
||||
prompts=PROMPTS,
|
||||
repo_url=args.repo,
|
||||
)
|
||||
|
||||
result = workflow.run()
|
||||
if not result or not result.get("success"):
|
||||
print("Todo creator workflow failed")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
67
worker/orca-agent/src/workflows/repoSummarizer/phases.py
Normal file
67
worker/orca-agent/src/workflows/repoSummarizer/phases.py
Normal file
@ -0,0 +1,67 @@
|
||||
"""Task decomposition workflow phases implementation."""
|
||||
|
||||
from prometheus_swarm.workflows.base import WorkflowPhase, Workflow
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
class BranchCreationPhase(WorkflowPhase):
|
||||
def __init__(self, workflow: Workflow, conversation_id: str = None):
|
||||
super().__init__(
|
||||
workflow=workflow,
|
||||
prompt_name="create_branch",
|
||||
available_tools=["create_branch"],
|
||||
conversation_id=conversation_id,
|
||||
name="Branch Creation",
|
||||
)
|
||||
|
||||
|
||||
class RepoClassificationPhase(WorkflowPhase):
|
||||
def __init__(self, workflow: Workflow, conversation_id: str = None):
|
||||
super().__init__(
|
||||
workflow=workflow,
|
||||
prompt_name="classify_repository",
|
||||
available_tools=["read_file", "list_files", "classify_repository"],
|
||||
conversation_id=conversation_id,
|
||||
name="Repository Classificati on",
|
||||
)
|
||||
|
||||
|
||||
class ReadmeGenerationPhase(WorkflowPhase):
|
||||
def __init__(
|
||||
self, workflow: Workflow, conversation_id: str = None, prompt_name: str = None
|
||||
):
|
||||
super().__init__(
|
||||
workflow=workflow,
|
||||
prompt_name=prompt_name,
|
||||
available_tools=[
|
||||
"read_file",
|
||||
"list_files",
|
||||
"write_file",
|
||||
],
|
||||
conversation_id=conversation_id,
|
||||
name="Readme Generation",
|
||||
)
|
||||
|
||||
|
||||
class ReadmeReviewPhase(WorkflowPhase):
|
||||
def __init__(self, workflow: Workflow, conversation_id: str = None):
|
||||
super().__init__(
|
||||
workflow=workflow,
|
||||
prompt_name="review_readme_file",
|
||||
available_tools=["read_file", "list_files", "review_readme_file"],
|
||||
conversation_id=conversation_id,
|
||||
name="Readme Review",
|
||||
)
|
||||
|
||||
|
||||
class CreatePullRequestPhase(WorkflowPhase):
|
||||
def __init__(self, workflow: Workflow, conversation_id: str = None):
|
||||
super().__init__(
|
||||
workflow=workflow,
|
||||
prompt_name="create_pr",
|
||||
available_tools=["read_file", "list_files", "create_pull_request_legacy"],
|
||||
conversation_id=conversation_id,
|
||||
name="Create Pull Request",
|
||||
)
|
593
worker/orca-agent/src/workflows/repoSummarizer/prompts.py
Normal file
593
worker/orca-agent/src/workflows/repoSummarizer/prompts.py
Normal file
@ -0,0 +1,593 @@
|
||||
"""Prompts for the repository summarization workflow."""
|
||||
|
||||
PROMPTS = {
|
||||
"system_prompt": (
|
||||
"You are an expert software architect and technical lead specializing in summarizing "
|
||||
"repositories into comprehensive documentation. You excel at analyzing codebases "
|
||||
"and creating clear, structured documentation."
|
||||
),
|
||||
"create_branch": (
|
||||
"You need to create a feature branch for the README generation.\n"
|
||||
"Create a new branch with a descriptive name related to creating a README file.\n"
|
||||
),
|
||||
"classify_repository": (
|
||||
"Analyze the repository structure and identify the type of repository this is.\n"
|
||||
"Use the `classify_repository` tool to report your choice.\n"
|
||||
"You must choose one of the following repository types:\n"
|
||||
"- Library/SDK: Code meant to be imported and used by other developers\n"
|
||||
"- Web App: Frontend or full-stack web application\n"
|
||||
"- API Service: Server-side application providing APIs\n"
|
||||
"- Mobile App: Native or cross-platform mobile app\n"
|
||||
"- Tutorial: Educational repository demonstrating techniques\n"
|
||||
"- Template: Starter code for new projects\n"
|
||||
"- CLI Tool: Command-line interface application\n"
|
||||
"- Framework: Foundational structure for building applications\n"
|
||||
"- Data Science: Machine learning or data analysis project\n"
|
||||
"- Plugin: Extension or module for a larger system (e.g., CMS, IDE, platform)\n"
|
||||
"- Chrome Extension: Browser extension targeting the Chrome platform\n"
|
||||
"- Jupyter Notebook: Interactive code notebooks, often for demos or research\n"
|
||||
"- Infrastructure: Configuration or automation code (e.g., Docker, Terraform)\n"
|
||||
"- Smart Contract: Blockchain smart contracts, typically written in Solidity, Rust, etc.\n"
|
||||
"- DApp: Decentralized application with both smart contract and frontend components\n"
|
||||
"- Game: Codebase for a game or game engine (2D, 3D, or browser-based)\n"
|
||||
"- Desktop App: GUI application for desktop environments (e.g., Electron, Qt, Tauri)\n"
|
||||
"- Dataset: Repository containing structured data for analysis or training\n"
|
||||
"- Other: If it doesn't fit into any of the above categories\n"
|
||||
),
|
||||
"create_pr": (
|
||||
"You are creating a pull request for the documentation you have generated:\n"
|
||||
"IMPORTANT: Always use relative paths (e.g., 'src/file.py' not '/src/file.py')\n\n"
|
||||
"Steps to create the pull request:\n"
|
||||
"1. First examine the available files to understand the implementation\n"
|
||||
"2. Create a clear and descriptive PR title\n"
|
||||
"3. Write a comprehensive PR description that includes:\n"
|
||||
" - Description of all changes made\n"
|
||||
" - The main features and value of the documentation\n"
|
||||
),
|
||||
"library": (
|
||||
"Please scan the repository and generate or update a complete and professional readme_prometheus.md file for this repository, which is a software library intended"
|
||||
" for use by developers.\n\n"
|
||||
"Your README should be formatted in Markdown and include clearly defined section headers.\n\n"
|
||||
"Please include the following sections:\n"
|
||||
"1. **Project Overview**\n"
|
||||
" - A concise description of what the library does\n"
|
||||
" - Its main purpose and the problems it solves\n"
|
||||
" - Key features and benefits\n\n"
|
||||
"2. **Installation**\n"
|
||||
" - Instructions for installing the library using relevant package managers (e.g., npm, pip, etc.)\n"
|
||||
" - Mention any prerequisites if applicable\n\n"
|
||||
"3. **API Reference**\n"
|
||||
" - Generate a complete list of all publicly exported functions, classes, and constants from the library\n"
|
||||
" - For each item, include:\n"
|
||||
" - Its name\n"
|
||||
" - Description of what it does\n"
|
||||
" - Function signature with types and descriptions of parameters and return values\n"
|
||||
" - Example usage\n"
|
||||
" - Do not omit any significant exports — include everything that would be relevant to a developer using "
|
||||
"this library\n"
|
||||
" - Group related items (e.g., utility functions, configuration, components) under subsections if helpful\n"
|
||||
"4. **Repository Structure**\n"
|
||||
" - Briefly explain the purpose of key directories and files\n\n"
|
||||
"5. **Contributing**\n"
|
||||
" - Include basic instructions for how others can contribute\n"
|
||||
" - Mention where to find or how to run tests (if available)\n\n"
|
||||
"6. **License**\n"
|
||||
" - State the type of license and include a link to the license file\n\n"
|
||||
"Additional notes:\n"
|
||||
"- Use bullet points and code blocks to improve readability\n"
|
||||
"- Keep language friendly but technical and precise\n"
|
||||
"- If configuration or extension points exist, explain them clearly\n\n"
|
||||
),
|
||||
"web_app": (
|
||||
"Please scan the repository and generate or update a complete and professional readme_prometheus.md file for this repository, which is a web application "
|
||||
"project.\n\n"
|
||||
"Format the output using Markdown with clear section headers and proper formatting.\n\n"
|
||||
"Include the following sections:\n"
|
||||
"1. **Project Overview**\n"
|
||||
" - Describe the purpose and core functionality of the application\n"
|
||||
" - Highlight key features and typical use cases\n\n"
|
||||
"2. **Getting Started**\n"
|
||||
" - Provide setup instructions to run the app locally\n"
|
||||
" - Include steps for installing dependencies and starting the development server\n"
|
||||
" - Mention any required environment variables and how to configure them (e.g., `.env` file)\n\n"
|
||||
"3. **Deployment**\n"
|
||||
" - Describe how to build and deploy the application to production\n"
|
||||
" - Include relevant deployment commands and target platforms (e.g., Vercel, Netlify, Docker)\n\n"
|
||||
"4. **Project Structure**\n"
|
||||
" - Briefly explain the purpose of major folders and files (e.g., `src/`, `public/`, `components/`)\n\n"
|
||||
"5. **Technologies Used**\n"
|
||||
" - List the main frameworks, libraries, and tools (e.g., React, Vue, Vite, Tailwind)\n\n"
|
||||
"6. **Feature Highlights**\n"
|
||||
" - Describe core user-facing features or flows (e.g., authentication, dashboards, routing)\n\n"
|
||||
"7. **Configuration**\n"
|
||||
" - Mention any configurable options, build settings, or plugins used\n\n"
|
||||
"8. **License**\n"
|
||||
" - State the license type and link to the license file\n\n"
|
||||
"Additional Notes:\n"
|
||||
"- Use bullet points, code blocks, and links where appropriate\n"
|
||||
"- Make sure commands are copy-pasteable\n"
|
||||
"- Keep language clear and helpful for developers new to the project"
|
||||
),
|
||||
"api_service": (
|
||||
"Please scan the repository and generate or update a complete and professional readme_prometheus.md file for this repository, which is a backend service that "
|
||||
"exposes an API (e.g., REST, GraphQL, or similar).\n\n"
|
||||
"Format the output using Markdown with clear section headers and developer-friendly formatting.\n\n"
|
||||
"Include the following sections:\n"
|
||||
"1. **Project Overview**\n"
|
||||
" - Describe the purpose of the API and its core functionality\n"
|
||||
" - Highlight key features and typical use cases\n\n"
|
||||
"2. **Getting Started**\n"
|
||||
" - Provide setup instructions to run the service locally\n"
|
||||
" - Include dependency installation and environment variable setup\n"
|
||||
" - Describe how to start the server in development mode\n\n"
|
||||
"3. **API Documentation**\n"
|
||||
" - List the available endpoints or routes\n"
|
||||
" - For each endpoint, include:\n"
|
||||
" - Method (GET, POST, etc.)\n"
|
||||
" - Path and parameters\n"
|
||||
" - Example request and response\n"
|
||||
" - Authentication requirements (if any)\n"
|
||||
" - If an OpenAPI/Swagger spec or GraphQL schema exists, link to it\n\n"
|
||||
"4. **Authentication**\n"
|
||||
" - Describe how authentication works (e.g., API keys, OAuth, JWT)\n"
|
||||
" - Include example headers or auth flow steps if needed\n\n"
|
||||
"5. **Project Structure**\n"
|
||||
" - Explain key folders and files, such as `routes/`, `controllers/`, `models/`\n\n"
|
||||
"6. **Technologies Used**\n"
|
||||
" - List major frameworks, libraries, or tools (e.g., Express, FastAPI, Prisma)\n\n"
|
||||
"7. **Deployment**\n"
|
||||
" - Describe how to deploy the service (e.g., Docker, CI/CD, cloud platforms)\n"
|
||||
" - Include environment config or scaling considerations if relevant\n\n"
|
||||
"8. **License**\n"
|
||||
" - State the license type and link to the license file\n\n"
|
||||
"Additional Notes:\n"
|
||||
"- Use bullet points, code blocks, and sample payloads for clarity\n"
|
||||
"- Focus on making the API easy to understand and consume\n"
|
||||
"- Keep the tone clear and helpful for developers using the API"
|
||||
),
|
||||
"mobile_app": (
|
||||
"Please scan the repository and generate or update a complete and professional readme_prometheus.md file for this repository, which is a mobile application "
|
||||
"project.\n\n"
|
||||
"Format the output using Markdown with clear section headers and mobile developer–friendly formatting.\n\n"
|
||||
"Include the following sections:\n"
|
||||
"1. **Project Overview**\n"
|
||||
" - Describe the purpose and core functionality of the app\n"
|
||||
" - List key features and intended user experience\n\n"
|
||||
"2. **Supported Platforms**\n"
|
||||
" - Indicate whether the app runs on Android, iOS, or both\n"
|
||||
" - Mention any platform-specific dependencies or limitations\n\n"
|
||||
"3. **Getting Started**\n"
|
||||
" - Provide setup instructions for running the app locally\n"
|
||||
" - Include steps for installing dependencies and required SDKs (e.g., Android Studio, Xcode)\n"
|
||||
" - Describe how to configure environment variables or API keys\n\n"
|
||||
"4. **Running the App**\n"
|
||||
" - Show commands to run the app on a simulator/emulator or real device\n"
|
||||
" - Include platform-specific commands if needed (e.g., `npx react-native run-ios`, `flutter run`)\n\n"
|
||||
"5. **Project Structure**\n"
|
||||
" - Briefly explain the layout of important folders and files (e.g., `src/`, `ios/`, `android/`, `lib/`)\n\n"
|
||||
"6. **Technologies Used**\n"
|
||||
" - List the frameworks, SDKs, and libraries used (e.g., React Native, Flutter, Firebase)\n\n"
|
||||
"7. **Key Screens and Features**\n"
|
||||
" - Highlight core screens or flows within the app (e.g., login, profile, dashboard)\n"
|
||||
" - Optionally include screenshots or descriptions of user interactions\n\n"
|
||||
"8. **Build and Deployment**\n"
|
||||
" - Provide steps for creating production builds\n"
|
||||
" - Mention any tools or services used for distribution (e.g., TestFlight, Play Store, Expo)\n\n"
|
||||
"9. **License**\n"
|
||||
" - State the license type and link to the license file\n\n"
|
||||
"Additional Notes:\n"
|
||||
"- Use bullet points, code blocks, and platform-specific sections where needed\n"
|
||||
"- Make sure setup steps work for both Android and iOS where applicable\n"
|
||||
"- Keep the tone clear and helpful for mobile developers"
|
||||
),
|
||||
"tutorial": (
|
||||
"Please scan the repository and generate or update a complete and professional readme_prometheus.md file for this repository, which is designed as an educational "
|
||||
"tutorial or learning resource.\n\n"
|
||||
"Format the output using Markdown with clear section headers and a logical, beginner-friendly structure.\n\n"
|
||||
"Include the following sections:\n"
|
||||
"1. **Overview**\n"
|
||||
" - Summarize the goal of the tutorial and what users will learn\n"
|
||||
" - List key topics or technologies covered\n"
|
||||
" - Mention any prerequisites (e.g., knowledge of a language, tools to install)\n\n"
|
||||
"2. **Getting Started**\n"
|
||||
" - Provide step-by-step setup instructions\n"
|
||||
" - Include installation of dependencies, toolchain setup, and environment config\n"
|
||||
" - Ensure instructions work on major operating systems\n\n"
|
||||
"3. **Tutorial Structure**\n"
|
||||
" - Break down the tutorial into sections, stages, or lessons\n"
|
||||
" - Briefly describe what each section teaches or builds\n"
|
||||
" - Link to key files or folders associated with each part\n\n"
|
||||
"4. **Learning Outcomes**\n"
|
||||
" - Clearly list the skills or concepts users will have mastered by the end\n\n"
|
||||
"5. **Code Examples and Exercises**\n"
|
||||
" - Mention inline code snippets, checkpoints, or interactive examples\n"
|
||||
" - If exercises are included, describe how users should complete or test them\n\n"
|
||||
"6. **Project Structure**\n"
|
||||
" - Describe the layout of the repository and which files correspond to different tutorial stages\n\n"
|
||||
"7. **Next Steps / Further Reading**\n"
|
||||
" - Suggest where users can go after completing the tutorial\n"
|
||||
" - Include links to additional docs, libraries, or related tutorials\n\n"
|
||||
"8. **License**\n"
|
||||
" - State the license type and link to the license file\n\n"
|
||||
"Additional Notes:\n"
|
||||
"- Use beginner-friendly language without dumbing things down\n"
|
||||
"- Include code blocks, links, and visual structure to aid readability\n"
|
||||
"- Help users stay oriented by reminding them what they've done and what's next"
|
||||
),
|
||||
"template": (
|
||||
"Please scan the repository and generate or update a complete and professional readme_prometheus.md file for this repository, which serves as a project starter or "
|
||||
"boilerplate template.\n\n"
|
||||
"Format the output using Markdown with clear section headers and developer-friendly formatting.\n\n"
|
||||
"Include the following sections:\n"
|
||||
"1. **Project Overview**\n"
|
||||
" - Describe the purpose of this template and the type of projects it's meant for\n"
|
||||
" - List key features, tools, or configurations included by default\n\n"
|
||||
"2. **Getting Started**\n"
|
||||
" - Provide instructions for cloning or copying the template\n"
|
||||
" - Include setup steps: installing dependencies, environment config, and running locally\n\n"
|
||||
"3. **Customization Guide**\n"
|
||||
" - Explain which parts of the codebase are intended to be modified by users\n"
|
||||
" - Offer guidance on how to rename, rebrand, or restructure parts of the template\n\n"
|
||||
"4. **Project Structure**\n"
|
||||
" - Describe the layout of important directories and files\n"
|
||||
" - Highlight which files are meant for customization vs. boilerplate\n\n"
|
||||
"5. **Technologies Used**\n"
|
||||
" - List the frameworks, libraries, and tools integrated into the template (e.g., ESLint, Prettier, "
|
||||
"Tailwind, Express)\n\n"
|
||||
"6. **Use Cases**\n"
|
||||
" - Provide example scenarios where this template is useful (e.g., 'Use this for building a REST API with "
|
||||
"authentication')\n"
|
||||
" - Link to live demos or projects built from this template if available\n\n"
|
||||
"7. **Contributing**\n"
|
||||
" - If the template is open to contributions, provide basic instructions for submitting improvements\n\n"
|
||||
"8. **License**\n"
|
||||
" - State the license type and link to the license file\n\n"
|
||||
"Additional Notes:\n"
|
||||
"- Focus on helping users get started quickly and confidently\n"
|
||||
"- Use code blocks and examples to show how things work\n"
|
||||
"- Encourage best practices and provide defaults users can trust or extend"
|
||||
),
|
||||
"cli_tool": (
|
||||
"Please scan the repository and generate or update a complete and professional readme_prometheus.md file for this repository, which is a command-line "
|
||||
"interface (CLI) tool.\n\n"
|
||||
"Format the output using Markdown with clear section headers and include clear command-line examples.\n\n"
|
||||
"Include the following sections:\n"
|
||||
"1. **Project Overview**\n"
|
||||
" - Explain what the CLI tool does and why it's useful\n"
|
||||
" - Mention common use cases or problems it solves\n\n"
|
||||
"2. **Installation**\n"
|
||||
" - Provide steps to install the tool (e.g., npm, pip, Homebrew, binary download)\n"
|
||||
" - Mention any required dependencies or environment setup\n\n"
|
||||
"3. **Usage**\n"
|
||||
" - Show how to use the tool from the command line\n"
|
||||
" - Include at least 2–3 example commands with explanations of the output\n"
|
||||
" - Demonstrate the most common and useful flags or options\n"
|
||||
" - If the tool supports subcommands, show examples of each\n\n"
|
||||
"4. **Command Reference**\n"
|
||||
" - List all available commands, flags, and options in a table or list format\n"
|
||||
" - Explain each option clearly, including defaults and accepted values\n\n"
|
||||
"5. **Configuration (if applicable)**\n"
|
||||
" - Describe any optional or required configuration files (e.g., `.clirc`, `config.json`)\n"
|
||||
" - Show example configurations and where to place them\n\n"
|
||||
"6. **Project Structure**\n"
|
||||
" - Briefly describe key files or folders related to the CLI's source code\n\n"
|
||||
"7. **Contributing**\n"
|
||||
" - Outline how to contribute, test changes, or add new commands\n\n"
|
||||
"8. **License**\n"
|
||||
" - State the license type and link to the license file\n\n"
|
||||
"Additional Notes:\n"
|
||||
"- Use code blocks for command examples and outputs\n"
|
||||
"- Keep tone practical and clear, suitable for developers or power users\n"
|
||||
"- Focus on usability and real-world examples of the tool in action"
|
||||
),
|
||||
"framework": (
|
||||
"Please scan the repository and generate or update a complete and professional readme_prometheus.md file for this repository, which is a software framework "
|
||||
"designed to be extended or used as a foundation for building applications.\n\n"
|
||||
"Format the output using Markdown with clear section headers and structured, developer-friendly formatting.\n\n"
|
||||
"Include the following sections:\n"
|
||||
"1. **Project Overview**\n"
|
||||
" - Describe what the framework does and the type of projects it's built for\n"
|
||||
" - Highlight key concepts and design philosophy (e.g., convention over configuration, modularity)\n\n"
|
||||
"2. **Getting Started**\n"
|
||||
" - Include steps for installing and initializing a new project using the framework\n"
|
||||
" - Provide a minimal working example with code blocks\n\n"
|
||||
"3. **Core Concepts**\n"
|
||||
" - Explain the main components or building blocks (e.g., modules, services, lifecycle, routing, etc.)\n"
|
||||
" - Include diagrams or conceptual overviews if helpful\n\n"
|
||||
"4. **Extension Points**\n"
|
||||
" - Describe how developers can extend the framework (e.g., plugins, middleware, custom components)\n"
|
||||
" - Include examples of common extension use cases\n\n"
|
||||
"5. **Project Structure**\n"
|
||||
" - Explain the directory layout of a typical project using the framework\n"
|
||||
" - Highlight where user code should live and where internal framework logic resides\n\n"
|
||||
"6. **Technologies Used**\n"
|
||||
" - List core dependencies, supported environments, or language-level features leveraged\n\n"
|
||||
"7. **Best Practices**\n"
|
||||
" - Offer guidance for structuring large projects, writing maintainable code, or following framework "
|
||||
"conventions\n\n"
|
||||
"8. **Contributing**\n"
|
||||
" - Outline how contributors can report issues, add features, or build plugins for the framework\n\n"
|
||||
"9. **License**\n"
|
||||
" - State the license type and link to the license file\n\n"
|
||||
"Additional Notes:\n"
|
||||
"- Use clear examples and code snippets to explain key abstractions\n"
|
||||
"- Keep the tone empowering and oriented toward other developers building on top of the framework\n"
|
||||
"- Emphasize extensibility and conceptual clarity"
|
||||
),
|
||||
"data_science": (
|
||||
"Please scan the repository and generate or update a complete and professional readme_prometheus.md file for this repository, which is a data science or "
|
||||
"machine learning project.\n\n"
|
||||
"Format the output using Markdown with clear section headers and helpful formatting for technical readers.\n\n"
|
||||
"Include the following sections:\n"
|
||||
"1. **Project Overview**\n"
|
||||
" - Explain the goal of the project (e.g., prediction, classification, analysis)\n"
|
||||
" - Summarize key findings or outcomes (if applicable)\n\n"
|
||||
"2. **Dataset**\n"
|
||||
" - Describe the dataset used (source, size, structure)\n"
|
||||
" - Include schema information or link to external data sources\n"
|
||||
" - Mention whether the data is included in the repo or needs to be downloaded\n\n"
|
||||
"3. **Installation and Setup**\n"
|
||||
" - List dependencies and setup instructions (e.g., `requirements.txt`, `environment.yml`)\n"
|
||||
" - Mention any additional setup (e.g., downloading data, creating folders)\n\n"
|
||||
"4. **Project Structure**\n"
|
||||
" - Explain the layout of scripts, notebooks, data folders, and model outputs\n"
|
||||
" - Highlight the main entry points for running the pipeline\n\n"
|
||||
"5. **Model Architecture and Training**\n"
|
||||
" - Briefly describe the model(s) used and why they were chosen\n"
|
||||
" - Include training scripts and command-line instructions\n"
|
||||
" - Mention metrics used for evaluation\n\n"
|
||||
"6. **Evaluation and Results**\n"
|
||||
" - Summarize how the model was evaluated and key performance metrics\n"
|
||||
" - Optionally include plots, confusion matrices, or sample outputs\n\n"
|
||||
"7. **Inference / How to Use the Model**\n"
|
||||
" - Explain how to run inference or apply the model to new data\n"
|
||||
" - Include input/output formats and example commands or code\n\n"
|
||||
"8. **Technologies Used**\n"
|
||||
" - List key tools, libraries, and frameworks (e.g., scikit-learn, TensorFlow, pandas)\n\n"
|
||||
"9. **License**\n"
|
||||
" - State the license type and link to the license file\n\n"
|
||||
"Additional Notes:\n"
|
||||
"- Use code blocks and examples where appropriate\n"
|
||||
"- Ensure reproducibility by including all necessary setup instructions\n"
|
||||
"- Keep the tone professional and geared toward data scientists or ML engineers"
|
||||
),
|
||||
"plugin": (
|
||||
"Please scan the repository and generate or update a complete and professional readme_prometheus.md file for this repository, which is a plugin or extension "
|
||||
"designed to integrate with a larger platform (such as a CMS, IDE, or framework).\n\n"
|
||||
"Format the output using Markdown with clear section headers.\n\n"
|
||||
"Include the following sections:\n"
|
||||
"1. **Overview**\n"
|
||||
" - Describe what this plugin does and the host system it's built for\n"
|
||||
" - List key features and benefits\n\n"
|
||||
"2. **Installation**\n"
|
||||
" - Provide installation instructions specific to the host platform\n"
|
||||
" - Mention compatible versions and any prerequisites\n\n"
|
||||
"3. **Usage**\n"
|
||||
" - Show how to enable and configure the plugin\n"
|
||||
" - Include code snippets or configuration steps\n\n"
|
||||
"4. **Integration Points**\n"
|
||||
" - Describe hooks, lifecycle methods, or extension APIs the plugin interacts with\n\n"
|
||||
"5. **Project Structure**\n"
|
||||
" - Briefly explain key files and folders\n\n"
|
||||
"6. **Technologies Used**\n"
|
||||
" - List frameworks, languages, or tooling\n\n"
|
||||
"7. **License**\n"
|
||||
" - State the license type and link to the license file"
|
||||
),
|
||||
"chrome_extension": (
|
||||
"Please scan the repository and generate or update a complete and professional readme_prometheus.md file for this repository, which is a Chrome extension "
|
||||
"project.\n\n"
|
||||
"Format the output using Markdown with clear section headers.\n\n"
|
||||
"Include the following sections:\n"
|
||||
"1. **Overview**\n"
|
||||
" - Describe the purpose and features of the extension\n\n"
|
||||
"2. **Installation**\n"
|
||||
" - Include instructions for loading the extension in Chrome (via the Extensions page or Chrome Web Store)\n"
|
||||
" - Mention required permissions and how to review the manifest\n\n"
|
||||
"3. **Usage**\n"
|
||||
" - Explain how users interact with the extension (e.g., popup UI, context menu, background scripts)\n"
|
||||
" - Include example scenarios or screenshots if applicable\n\n"
|
||||
"4. **Project Structure**\n"
|
||||
" - Briefly describe key files like `manifest.json`, `background.js`, and popup components\n\n"
|
||||
"5. **Technologies Used**\n"
|
||||
" - List libraries or frameworks (e.g., vanilla JS, React, Tailwind)\n\n"
|
||||
"6. **License**\n"
|
||||
" - State the license type and link to the license file"
|
||||
),
|
||||
"jupyter_notebook": (
|
||||
"Please scan the repository and generate or update a complete and professional readme_prometheus.md file for this repository, which consists of one or more "
|
||||
"Jupyter notebooks.\n\n"
|
||||
"Format the output using Markdown with clear section headers.\n\n"
|
||||
"Include the following sections:\n"
|
||||
"1. **Overview**\n"
|
||||
" - Describe the purpose of the notebooks and what they demonstrate or analyze\n\n"
|
||||
"2. **Getting Started**\n"
|
||||
" - Provide instructions for setting up the environment (e.g., installing Jupyter, dependencies, "
|
||||
"virtualenv)\n"
|
||||
" - Mention how to launch the notebooks (e.g., `jupyter notebook` or `jupyter lab`)\n\n"
|
||||
"3. **Notebook Summary**\n"
|
||||
" - List and briefly describe each notebook in the repo\n"
|
||||
" - Mention whether they build on each other or are standalone\n\n"
|
||||
"4. **Dataset (if applicable)**\n"
|
||||
" - Describe any datasets used and where they come from\n\n"
|
||||
"5. **Technologies Used**\n"
|
||||
" - List libraries (e.g., pandas, matplotlib, scikit-learn)\n\n"
|
||||
"6. **License**\n"
|
||||
" - State the license type and link to the license file"
|
||||
),
|
||||
"infrastructure": (
|
||||
"Please scan the repository and generate or update a complete and professional readme_prometheus.md file for this repository, which contains "
|
||||
"infrastructure-as-code or deployment configuration (e.g., Docker, Terraform, Ansible).\n\n"
|
||||
"Format the output using Markdown with clear section headers.\n\n"
|
||||
"Include the following sections:\n"
|
||||
"1. **Overview**\n"
|
||||
" - Explain what infrastructure is being managed and its intended use\n\n"
|
||||
"2. **Setup**\n"
|
||||
" - Describe any prerequisites (e.g., installing Docker, Terraform CLI, cloud access credentials)\n"
|
||||
" - Include instructions for initializing and applying the configuration\n\n"
|
||||
"3. **Configuration Files**\n"
|
||||
" - Explain the structure and purpose of major files (e.g., `main.tf`, `docker-compose.yml`, "
|
||||
"`playbooks/`)\n\n"
|
||||
"4. **Deployment Workflow**\n"
|
||||
" - Describe how deployments are triggered and verified\n"
|
||||
" - Mention any CI/CD pipelines, remote state management, or secrets handling\n\n"
|
||||
"5. **Environments**\n"
|
||||
" - Clarify how to deploy to multiple environments (dev, staging, prod)\n\n"
|
||||
"6. **License**\n"
|
||||
" - State the license type and link to the license file"
|
||||
),
|
||||
"smart_contract": (
|
||||
"Please scan the repository and generate or update a complete and professional readme_prometheus.md file for this repository, which contains smart contracts "
|
||||
"written for a blockchain platform (e.g., Ethereum, Solana).\n\n"
|
||||
"Format the output using Markdown with clear section headers.\n\n"
|
||||
"Include the following sections:\n"
|
||||
"1. **Overview**\n"
|
||||
" - Explain the purpose and functionality of the smart contracts\n"
|
||||
" - Mention the target blockchain platform\n\n"
|
||||
"2. **Installation and Setup**\n"
|
||||
" - List dependencies and setup instructions (e.g., hardhat, anchor, solana-cli)\n"
|
||||
" - Include local devnet instructions if applicable\n\n"
|
||||
"3. **Contracts**\n"
|
||||
" - Describe the main contract(s) and what each one does\n"
|
||||
" - Include deployment steps and how to interact with them\n\n"
|
||||
"4. **Testing**\n"
|
||||
" - Explain how to run tests and what framework is used\n\n"
|
||||
"5. **Project Structure**\n"
|
||||
" - Describe layout of contracts, migrations, and test files\n\n"
|
||||
"6. **License**\n"
|
||||
" - State the license type and link to the license file"
|
||||
),
|
||||
"dapp": (
|
||||
"Please scan the repository and generate or update a complete and professional readme_prometheus.md file for this repository, which is a decentralized application "
|
||||
"(dApp) that includes both smart contract(s) and a web-based frontend.\n\n"
|
||||
"Format the output using Markdown with clear section headers and examples for both on-chain and off-chain "
|
||||
"components.\n\n"
|
||||
"Include the following sections:\n"
|
||||
"1. **Overview**\n"
|
||||
" - Describe what the dApp does and the blockchain ecosystem it runs on\n"
|
||||
" - Mention the smart contract platform (e.g., Ethereum, Solana, NEAR) and wallet compatibility\n\n"
|
||||
"2. **Architecture**\n"
|
||||
" - Provide a high-level diagram or explanation of how the frontend interacts with smart contracts\n"
|
||||
" - Mention key technologies used on both sides (e.g., React, Ethers.js, Anchor, Web3.js)\n\n"
|
||||
"3. **Getting Started**\n"
|
||||
" - Provide setup instructions for both frontend and backend\n"
|
||||
" - Include how to install dependencies, configure environment variables, and run locally\n\n"
|
||||
"4. **Smart Contracts**\n"
|
||||
" - Describe the deployed contracts and how to interact with them\n"
|
||||
" - Include deployment instructions and test commands\n\n"
|
||||
"5. **Frontend**\n"
|
||||
" - Describe key UI components and user flows (e.g., connect wallet, mint token, submit vote)\n"
|
||||
" - Mention any integrations with IPFS, oracles, or off-chain data\n\n"
|
||||
"6. **License**\n"
|
||||
" - State the license type and link to the license file"
|
||||
),
|
||||
"game": (
|
||||
"Please scan the repository and generate or update a complete and professional readme_prometheus.md file for this repository, which is a game or game engine "
|
||||
"project.\n\n"
|
||||
"Format the output using Markdown with clear section headers and provide clear instructions for playing and "
|
||||
"modifying the game.\n\n"
|
||||
"Include the following sections:\n"
|
||||
"1. **Overview**\n"
|
||||
" - Describe the game concept, genre, and platform (e.g., browser, desktop, mobile)\n"
|
||||
" - Mention gameplay goals or mechanics\n\n"
|
||||
"2. **Installation and Setup**\n"
|
||||
" - Provide instructions for installing dependencies and running the game\n"
|
||||
" - Include setup for game engines or SDKs (e.g., Unity, Godot, Phaser, Unreal)\n\n"
|
||||
"3. **Controls and Gameplay**\n"
|
||||
" - Explain player controls and core mechanics\n"
|
||||
" - Optionally include screenshots, video, or demo links\n\n"
|
||||
"4. **Project Structure**\n"
|
||||
" - Describe key files and folders (e.g., assets, levels, scripts)\n\n"
|
||||
"5. **Technologies Used**\n"
|
||||
" - List engines, frameworks, or libraries used to build the game\n\n"
|
||||
"6. **License**\n"
|
||||
" - State the license type and link to the license file"
|
||||
),
|
||||
"desktop_app": (
|
||||
"Please scan the repository and generate or update a complete and professional readme_prometheus.md file for this repository, which is a desktop application "
|
||||
"project built with technologies like Electron, Tauri, Qt, or native frameworks.\n\n"
|
||||
"Format the output using Markdown with clear section headers and platform-aware instructions.\n\n"
|
||||
"Include the following sections:\n"
|
||||
"1. **Overview**\n"
|
||||
" - Describe what the desktop app does and who it's for\n"
|
||||
" - Mention platforms supported (e.g., Windows, macOS, Linux)\n\n"
|
||||
"2. **Installation and Setup**\n"
|
||||
" - Provide platform-specific install/build instructions\n"
|
||||
" - Include steps for running the app in development and building a production release\n\n"
|
||||
"3. **Usage**\n"
|
||||
" - Describe the app's main features and user workflows\n"
|
||||
" - Include screenshots if applicable\n\n"
|
||||
"4. **Project Structure**\n"
|
||||
" - Describe key files and folders (e.g., main process, renderer process, assets)\n\n"
|
||||
"5. **Technologies Used**\n"
|
||||
" - List major libraries, frameworks, and build tools\n\n"
|
||||
"6. **License**\n"
|
||||
" - State the license type and link to the license file"
|
||||
),
|
||||
"dataset": (
|
||||
"Please scan the repository and generate or update a complete and professional readme_prometheus.md file for this repository, which contains a dataset for "
|
||||
"analysis, training, or research purposes.\n\n"
|
||||
"Format the output using Markdown with clear section headers and data-focused structure.\n\n"
|
||||
"Include the following sections:\n"
|
||||
"1. **Overview**\n"
|
||||
" - Describe what the dataset contains and its intended purpose\n"
|
||||
" - Mention the source and whether it was collected, generated, or aggregated\n\n"
|
||||
"2. **Dataset Details**\n"
|
||||
" - Describe the structure and format (e.g., CSV, JSON, images, text)\n"
|
||||
" - Include column definitions, schema, or data dictionaries\n"
|
||||
" - Mention the number of records, size, and any notable characteristics\n\n"
|
||||
"3. **Usage Instructions**\n"
|
||||
" - Provide example code snippets for loading and using the dataset (e.g., pandas, SQL, etc.)\n"
|
||||
" - Mention any preprocessing steps if needed\n\n"
|
||||
"4. **Licensing and Terms of Use**\n"
|
||||
" - State the license and any restrictions on usage or distribution\n"
|
||||
" - Include citation or attribution instructions if required\n\n"
|
||||
"5. **Related Work / Source Links**\n"
|
||||
" - Link to original data sources, research papers, or related projects (if applicable)"
|
||||
),
|
||||
"other": (
|
||||
"Please scan the repository and generate or update a complete and professional readme_prometheus.md file for this repository.\n\n"
|
||||
"{previous_review_comments_section}\n\n"
|
||||
"Analyze the contents of the repository to infer its intent, and format the README using Markdown with "
|
||||
"clear section headers.\n\n"
|
||||
"Include the following general sections, customizing them as needed based on the repository type:\n"
|
||||
"1. **Project Overview**\n"
|
||||
" - Describe the purpose of the project and its main functionality\n"
|
||||
" - Summarize what the project does and who it's for\n\n"
|
||||
"2. **Getting Started**\n"
|
||||
" - Include setup or usage instructions based on the repo's structure\n"
|
||||
" - Mention installation steps, dependencies, and commands to run or use the project\n\n"
|
||||
"3. **Features / Capabilities**\n"
|
||||
" - List the core features or components of the project\n"
|
||||
" - Include relevant examples, demos, or configurations if applicable\n\n"
|
||||
"4. **Project Structure**\n"
|
||||
" - Describe the layout of files and folders, especially any key scripts, configs, or assets\n\n"
|
||||
"5. **Technologies Used**\n"
|
||||
" - List any major frameworks, libraries, or languages identified in the project\n\n"
|
||||
"6. **Usage Examples** (if applicable)\n"
|
||||
" - Include example commands or steps showing how to use the project\n\n"
|
||||
"7. **License**\n"
|
||||
" - State the license type and link to the license file\n\n"
|
||||
"Additional Notes:\n"
|
||||
"- Focus on making the README useful and descriptive, even if the project type is ambiguous\n"
|
||||
"- Use best judgment to tailor the content to the actual functionality and audience of the project\n"
|
||||
"- Avoid placeholder text and strive to extract real, useful information from the codebase"
|
||||
),
|
||||
"review_readme_file": (
|
||||
"Review the readme_prometheus.md file in the repository and evaluate its quality and relevance to the repository.\n\n"
|
||||
"Please analyze:\n"
|
||||
"1. Is the readme_prometheus.md file related to this specific repository? (Does it describe the actual code and purpose of this repo?)\n"
|
||||
"2. Does it correctly explain the repository's purpose, features, and functionality?\n"
|
||||
"3. Is it comprehensive enough to help users understand and use the repository?\n"
|
||||
"4. Does it follow best practices for README documentation?\n\n"
|
||||
"Use the validate_implementation tool to submit your findings.\n"
|
||||
"STOP after submitting the review report."
|
||||
),
|
||||
"previous_review_comments": (
|
||||
"Here are the comments from the previous review:\n"
|
||||
),
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
277
worker/orca-agent/src/workflows/repoSummarizer/workflow.py
Normal file
277
worker/orca-agent/src/workflows/repoSummarizer/workflow.py
Normal file
@ -0,0 +1,277 @@
|
||||
"""Task decomposition workflow implementation."""
|
||||
|
||||
import os
|
||||
from github import Github
|
||||
from prometheus_swarm.workflows.base import Workflow
|
||||
from prometheus_swarm.utils.logging import log_section, log_key_value, log_error
|
||||
from src.workflows.repoSummarizer import phases
|
||||
from prometheus_swarm.workflows.utils import (
|
||||
check_required_env_vars,
|
||||
cleanup_repository,
|
||||
validate_github_auth,
|
||||
setup_repository
|
||||
)
|
||||
from src.workflows.repoSummarizer.prompts import PROMPTS
|
||||
|
||||
|
||||
class Task:
|
||||
def __init__(self, title: str, description: str, acceptance_criteria: list[str]):
|
||||
self.title = title
|
||||
self.description = description
|
||||
self.acceptance_criteria = acceptance_criteria
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""Convert task to dictionary format."""
|
||||
return {
|
||||
"title": self.title,
|
||||
"description": self.description,
|
||||
"acceptance_criteria": self.acceptance_criteria,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict) -> "Task":
|
||||
"""Create task from dictionary."""
|
||||
return cls(
|
||||
title=data["title"],
|
||||
description=data["description"],
|
||||
acceptance_criteria=data["acceptance_criteria"],
|
||||
)
|
||||
|
||||
|
||||
class RepoSummarizerWorkflow(Workflow):
|
||||
def __init__(
|
||||
self,
|
||||
client,
|
||||
prompts,
|
||||
repo_url,
|
||||
):
|
||||
# Extract owner and repo name from URL
|
||||
# URL format: https://github.com/owner/repo
|
||||
parts = repo_url.strip("/").split("/")
|
||||
repo_owner = parts[-2]
|
||||
repo_name = parts[-1]
|
||||
|
||||
super().__init__(
|
||||
client=client,
|
||||
prompts=prompts,
|
||||
repo_url=repo_url,
|
||||
repo_owner=repo_owner,
|
||||
repo_name=repo_name,
|
||||
)
|
||||
|
||||
def setup(self):
|
||||
"""Set up repository and workspace."""
|
||||
check_required_env_vars(["GITHUB_TOKEN", "GITHUB_USERNAME"])
|
||||
validate_github_auth(os.getenv("GITHUB_TOKEN"), os.getenv("GITHUB_USERNAME"))
|
||||
|
||||
# Get the default branch from GitHub
|
||||
try:
|
||||
gh = Github(os.getenv("GITHUB_TOKEN"))
|
||||
self.context["repo_full_name"] = (
|
||||
f"{self.context['repo_owner']}/{self.context['repo_name']}"
|
||||
)
|
||||
|
||||
repo = gh.get_repo(
|
||||
f"{self.context['repo_owner']}/{self.context['repo_name']}"
|
||||
)
|
||||
self.context["base"] = repo.default_branch
|
||||
log_key_value("Default branch", self.context["base"])
|
||||
except Exception as e:
|
||||
log_error(e, "Failed to get default branch, using 'main'")
|
||||
self.context["base"] = "main"
|
||||
|
||||
# Set up repository directory
|
||||
setup_result = setup_repository(self.context["repo_url"], github_token=os.getenv("GITHUB_TOKEN"), github_username=os.getenv("GITHUB_USERNAME"))
|
||||
if not setup_result["success"]:
|
||||
raise Exception(f"Failed to set up repository: {setup_result['message']}")
|
||||
self.context["github_token"] = os.getenv("GITHUB_TOKEN")
|
||||
self.context["repo_path"] = setup_result["data"]["clone_path"]
|
||||
self.original_dir = setup_result["data"]["original_dir"]
|
||||
self.context["fork_url"] = setup_result["data"]["fork_url"]
|
||||
self.context["fork_owner"] = setup_result["data"]["fork_owner"]
|
||||
self.context["fork_name"] = setup_result["data"]["fork_name"]
|
||||
|
||||
# Enter repo directory
|
||||
os.chdir(self.context["repo_path"])
|
||||
|
||||
# Configure Git user info
|
||||
# setup_git_user_config(self.context["repo_path"])
|
||||
|
||||
# Get current files for context
|
||||
|
||||
def cleanup(self):
|
||||
"""Cleanup workspace."""
|
||||
# Make sure we're not in the repo directory before cleaning up
|
||||
if os.getcwd() == self.context.get("repo_path", ""):
|
||||
os.chdir(self.original_dir)
|
||||
|
||||
# Clean up the repository directory
|
||||
cleanup_repository(self.original_dir, self.context.get("repo_path", ""))
|
||||
# Clean up the MongoDB
|
||||
|
||||
def run(self):
|
||||
self.setup()
|
||||
|
||||
# Create a feature branch
|
||||
log_section("CREATING FEATURE BRANCH")
|
||||
branch_phase = phases.BranchCreationPhase(workflow=self)
|
||||
branch_result = branch_phase.execute()
|
||||
|
||||
if not branch_result or not branch_result.get("success"):
|
||||
log_error(Exception("Branch creation failed"), "Branch creation failed")
|
||||
return {
|
||||
"success": False,
|
||||
"message": "Branch creation failed",
|
||||
"data": None,
|
||||
}
|
||||
|
||||
# Store branch name in context
|
||||
self.context["head"] = branch_result["data"]["branch_name"]
|
||||
log_key_value("Branch created", self.context["head"])
|
||||
|
||||
# Classify repository
|
||||
repo_classification_result = self.classify_repository()
|
||||
if not repo_classification_result or not repo_classification_result.get(
|
||||
"success"
|
||||
):
|
||||
log_error(
|
||||
Exception("Repository classification failed"),
|
||||
"Repository classification failed",
|
||||
)
|
||||
return {
|
||||
"success": False,
|
||||
"message": "Repository classification failed",
|
||||
"data": None,
|
||||
}
|
||||
|
||||
# Get prompt name for README generation
|
||||
prompt_name = repo_classification_result["data"].get("prompt_name")
|
||||
if not prompt_name:
|
||||
log_error(
|
||||
Exception("No prompt name returned from repository classification"),
|
||||
"Repository classification failed to provide prompt name",
|
||||
)
|
||||
return {
|
||||
"success": False,
|
||||
"message": "Repository classification failed to provide prompt name",
|
||||
"data": None,
|
||||
}
|
||||
|
||||
# Generate README file
|
||||
for i in range(3):
|
||||
if i > 0:
|
||||
prompt_name = "other"
|
||||
readme_result = self.generate_readme_file(prompt_name)
|
||||
if not readme_result or not readme_result.get("success"):
|
||||
log_error(Exception("README generation failed"), "README generation failed")
|
||||
return {
|
||||
"success": False,
|
||||
"message": "README generation failed",
|
||||
"data": None,
|
||||
}
|
||||
if readme_result.get("success"):
|
||||
review_result = self.review_readme_file(readme_result)
|
||||
if not review_result or not review_result.get("success"):
|
||||
log_error(Exception("README review failed"), "README review failed")
|
||||
return {
|
||||
"success": False,
|
||||
"message": "README review failed",
|
||||
"data": None,
|
||||
}
|
||||
log_key_value("README review result", review_result.get("data"))
|
||||
if review_result.get("success") and review_result.get("data").get("recommendation") == "APPROVE":
|
||||
result = self.create_pull_request()
|
||||
return result
|
||||
else:
|
||||
self.context["previous_review_comments_section"] = PROMPTS["previous_review_comments"] + review_result.get("data").get("comment")
|
||||
|
||||
|
||||
|
||||
|
||||
return {
|
||||
"success": False,
|
||||
"message": "README Review Exceed Max Attempts",
|
||||
"data": None,
|
||||
}
|
||||
|
||||
def classify_repository(self):
|
||||
try:
|
||||
log_section("CLASSIFYING REPOSITORY TYPE")
|
||||
repo_classification_phase = phases.RepoClassificationPhase(workflow=self)
|
||||
return repo_classification_phase.execute()
|
||||
except Exception as e:
|
||||
log_error(e, "Repository classification workflow failed")
|
||||
return {
|
||||
"success": False,
|
||||
"message": f"Repository classification workflow failed: {str(e)}",
|
||||
"data": None,
|
||||
}
|
||||
def review_readme_file(self, readme_result):
|
||||
"""Execute the issue generation workflow."""
|
||||
try:
|
||||
log_section("REVIEWING README FILE")
|
||||
review_readme_file_phase = phases.ReadmeReviewPhase(workflow=self)
|
||||
return review_readme_file_phase.execute()
|
||||
except Exception as e:
|
||||
log_error(e, "Readme file review workflow failed")
|
||||
return {
|
||||
"success": False,
|
||||
"message": f"Readme file review workflow failed: {str(e)}",
|
||||
"data": None,
|
||||
}
|
||||
|
||||
def generate_readme_file(self, prompt_name):
|
||||
"""Execute the issue generation workflow."""
|
||||
try:
|
||||
|
||||
# ==================== Generate README file ====================
|
||||
log_section("GENERATING README FILE")
|
||||
generate_readme_file_phase = phases.ReadmeGenerationPhase(
|
||||
workflow=self, prompt_name=prompt_name
|
||||
)
|
||||
readme_result = generate_readme_file_phase.execute()
|
||||
|
||||
# Check README Generation Result
|
||||
if not readme_result or not readme_result.get("success"):
|
||||
log_error(
|
||||
Exception(readme_result.get("error", "No result")),
|
||||
"Readme file generation failed",
|
||||
)
|
||||
return None
|
||||
|
||||
return readme_result
|
||||
|
||||
except Exception as e:
|
||||
log_error(e, "Readme file generation workflow failed")
|
||||
return {
|
||||
"success": False,
|
||||
"message": f"Readme file generation workflow failed: {str(e)}",
|
||||
"data": None,
|
||||
}
|
||||
|
||||
def create_pull_request(self):
|
||||
"""Create a pull request for the README file."""
|
||||
try:
|
||||
log_section("CREATING PULL REQUEST")
|
||||
|
||||
# Add required PR title and description parameters to context
|
||||
self.context["title"] = f"Prometheus: Add README for {self.context['repo_name']}"
|
||||
self.context["description"] = (
|
||||
f"This PR adds a README file for the {self.context['repo_name']} repository."
|
||||
)
|
||||
|
||||
log_key_value(
|
||||
"Creating PR",
|
||||
f"from {self.context['head']} to {self.context['base']}",
|
||||
)
|
||||
|
||||
print("CONTEXT", self.context)
|
||||
create_pull_request_phase = phases.CreatePullRequestPhase(workflow=self)
|
||||
return create_pull_request_phase.execute()
|
||||
except Exception as e:
|
||||
log_error(e, "Pull request creation workflow failed")
|
||||
return {
|
||||
"success": False,
|
||||
"message": f"Pull request creation workflow failed: {str(e)}",
|
||||
"data": None,
|
||||
}
|
@ -0,0 +1,52 @@
|
||||
"""Entry point for the todo creator workflow."""
|
||||
|
||||
import sys
|
||||
import argparse
|
||||
from dotenv import load_dotenv
|
||||
from src.workflows.repoSummarizerAudit.workflow import repoSummarizerAuditWorkflow
|
||||
from src.workflows.repoSummarizerAudit.prompts import PROMPTS
|
||||
from prometheus_swarm.clients import setup_client
|
||||
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
|
||||
|
||||
def main():
|
||||
"""Run the todo creator workflow."""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Create tasks from a feature specification for a GitHub repository"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--pr-url",
|
||||
type=str,
|
||||
required=True,
|
||||
help="GitHub pull request URL (e.g., https://github.com/owner/repo/pull/1)",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--model",
|
||||
type=str,
|
||||
default="anthropic",
|
||||
choices=["anthropic", "openai", "xai"],
|
||||
help="Model provider to use (default: anthropic)",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
# Initialize client
|
||||
client = setup_client(args.model)
|
||||
|
||||
# Run the todo creator workflow
|
||||
workflow = repoSummarizerAuditWorkflow(
|
||||
client=client,
|
||||
prompts=PROMPTS,
|
||||
pr_url=args.pr_url,
|
||||
)
|
||||
|
||||
result = workflow.run()
|
||||
if not result or not result.get("success"):
|
||||
print("Todo creator workflow failed")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
@ -0,0 +1,15 @@
|
||||
"""Task decomposition workflow phases implementation."""
|
||||
|
||||
from prometheus_swarm.workflows.base import WorkflowPhase, Workflow
|
||||
|
||||
|
||||
class CheckReadmeFilePhase(WorkflowPhase):
|
||||
def __init__(self, workflow: Workflow, conversation_id: str = None):
|
||||
super().__init__(
|
||||
workflow=workflow,
|
||||
prompt_name="check_readme_file",
|
||||
available_tools=["read_file", "list_files", "review_pull_request_legacy"],
|
||||
conversation_id=conversation_id,
|
||||
name="Check Readme File",
|
||||
)
|
||||
|
@ -0,0 +1,29 @@
|
||||
"""Prompts for the repository summarization workflow."""
|
||||
|
||||
PROMPTS = {
|
||||
"system_prompt": (
|
||||
"You are an expert software architect and technical lead specializing in summarizing "
|
||||
"repositories into comprehensive documentation. You excel at analyzing codebases "
|
||||
"and creating clear, structured documentation."
|
||||
),
|
||||
|
||||
"check_readme_file": (
|
||||
"A pull request has been checked out for you. The repository is {repo_owner}/{repo_name} and "
|
||||
"the PR number is {pr_number}. The following files are available:\n"
|
||||
"{current_files}\n\n"
|
||||
"The criteria for the README file are:\n"
|
||||
"1. Project Overview\n"
|
||||
" - Purpose and main functionality\n"
|
||||
" - Key features\n"
|
||||
"2. Repository Structure\n"
|
||||
" - Detailed breakdown of directories and their purposes\n"
|
||||
" - Key files and their roles\n"
|
||||
"3. Technical Details\n"
|
||||
" - Technologies used\n"
|
||||
" - Architecture overview\n"
|
||||
"4. File Contents\n"
|
||||
" - Specific description of each significant file\n\n"
|
||||
"Please review the README file and give feedback.\n"
|
||||
),
|
||||
|
||||
}
|
163
worker/orca-agent/src/workflows/repoSummarizerAudit/workflow.py
Normal file
163
worker/orca-agent/src/workflows/repoSummarizerAudit/workflow.py
Normal file
@ -0,0 +1,163 @@
|
||||
"""Task decomposition workflow implementation."""
|
||||
|
||||
import os
|
||||
from github import Github
|
||||
from prometheus_swarm.workflows.base import Workflow
|
||||
from prometheus_swarm.utils.logging import log_section, log_key_value, log_error
|
||||
from src.workflows.repoSummarizerAudit import phases
|
||||
from prometheus_swarm.workflows.utils import (
|
||||
check_required_env_vars,
|
||||
validate_github_auth,
|
||||
setup_repository,
|
||||
cleanup_repository,
|
||||
get_current_files,
|
||||
)
|
||||
from git import Repo
|
||||
|
||||
|
||||
class Task:
|
||||
def __init__(self, title: str, description: str, acceptance_criteria: list[str]):
|
||||
self.title = title
|
||||
self.description = description
|
||||
self.acceptance_criteria = acceptance_criteria
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""Convert task to dictionary format."""
|
||||
return {
|
||||
"title": self.title,
|
||||
"description": self.description,
|
||||
"acceptance_criteria": self.acceptance_criteria,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict) -> "Task":
|
||||
"""Create task from dictionary."""
|
||||
return cls(
|
||||
title=data["title"],
|
||||
description=data["description"],
|
||||
acceptance_criteria=data["acceptance_criteria"],
|
||||
)
|
||||
|
||||
|
||||
class repoSummarizerAuditWorkflow(Workflow):
|
||||
def __init__(
|
||||
self,
|
||||
client,
|
||||
prompts,
|
||||
pr_url,
|
||||
):
|
||||
# Extract owner and repo name from URL
|
||||
# URL format: https://github.com/owner/repo
|
||||
parts = pr_url.strip("/").split("/")
|
||||
repo_owner = parts[-4]
|
||||
repo_name = parts[-3]
|
||||
pr_number = int(parts[-1]) # Convert to integer
|
||||
super().__init__(
|
||||
client=client,
|
||||
prompts=prompts,
|
||||
repo_owner=repo_owner,
|
||||
repo_name=repo_name,
|
||||
pr_number=pr_number,
|
||||
)
|
||||
self.context["pr_number"] = pr_number
|
||||
self.context["pr_url"] = pr_url
|
||||
self.context["repo_owner"] = repo_owner
|
||||
self.context["repo_name"] = repo_name
|
||||
self.context["repo_full_name"] = f"{repo_owner}/{repo_name}"
|
||||
|
||||
def setup(self):
|
||||
"""Set up repository and workspace."""
|
||||
# Check required environment variables and validate GitHub auth
|
||||
check_required_env_vars(["GITHUB_TOKEN", "GITHUB_USERNAME"])
|
||||
validate_github_auth(os.getenv("GITHUB_TOKEN"), os.getenv("GITHUB_USERNAME"))
|
||||
self.context["repo_url"] = f"https://github.com/{self.context['repo_owner']}/{self.context['repo_name']}"
|
||||
# Set up repository directory
|
||||
setup_result = setup_repository(self.context["repo_url"], github_token=os.getenv("GITHUB_TOKEN"), github_username=os.getenv("GITHUB_USERNAME"))
|
||||
if not setup_result["success"]:
|
||||
raise Exception(f"Failed to set up repository: {setup_result['message']}")
|
||||
|
||||
self.context["repo_path"] = setup_result["data"]["clone_path"]
|
||||
self.original_dir = setup_result["data"]["original_dir"]
|
||||
self.context["fork_url"] = setup_result["data"]["fork_url"]
|
||||
self.context["fork_owner"] = setup_result["data"]["fork_owner"]
|
||||
self.context["fork_name"] = setup_result["data"]["fork_name"]
|
||||
self.context["github_token"] = os.getenv("GITHUB_TOKEN")
|
||||
# Enter repo directory
|
||||
os.chdir(self.context["repo_path"])
|
||||
gh = Github(self.context["github_token"])
|
||||
repo = gh.get_repo(
|
||||
f"{self.context['repo_owner']}/{self.context['repo_name']}"
|
||||
)
|
||||
pr = repo.get_pull(self.context["pr_number"])
|
||||
self.context["pr"] = pr
|
||||
# Add remote for PR's repository and fetch the branch
|
||||
os.system(
|
||||
f"git remote add pr_source https://github.com/{pr.head.repo.full_name}"
|
||||
)
|
||||
os.system(f"git fetch pr_source {pr.head.ref}")
|
||||
os.system("git checkout FETCH_HEAD")
|
||||
|
||||
# Get current files for context
|
||||
self.context["current_files"] = get_current_files()
|
||||
|
||||
def cleanup(self):
|
||||
"""Cleanup workspace."""
|
||||
# Make sure we're not in the repo directory before cleaning up
|
||||
if os.getcwd() == self.context.get("repo_path", ""):
|
||||
os.chdir(self.original_dir)
|
||||
|
||||
# Clean up the repository directory
|
||||
cleanup_repository(self.original_dir, self.context.get("repo_path", ""))
|
||||
# Clean up the MongoDB
|
||||
|
||||
def run(self):
|
||||
check_readme_file_result = self.check_readme_file()
|
||||
|
||||
return check_readme_file_result
|
||||
|
||||
def check_readme_file(self):
|
||||
"""Execute the issue generation workflow."""
|
||||
try:
|
||||
self.setup()
|
||||
# ==================== Generate issues ====================
|
||||
check_readme_file_phase = phases.CheckReadmeFilePhase(workflow=self)
|
||||
check_readme_file_result = check_readme_file_phase.execute()
|
||||
# Check Issue Generation Result
|
||||
if not check_readme_file_result or not check_readme_file_result.get(
|
||||
"success"
|
||||
):
|
||||
log_error(
|
||||
Exception(check_readme_file_result.get("error", "No result")),
|
||||
"Readme file check failed",
|
||||
)
|
||||
return {
|
||||
"success": False,
|
||||
"message": "Readme file check failed",
|
||||
"data": {
|
||||
"recommendation": False,
|
||||
},
|
||||
}
|
||||
log_section("Readme file check completed")
|
||||
print(check_readme_file_result)
|
||||
recommendation = check_readme_file_result["data"]["recommendation"]
|
||||
log_key_value(
|
||||
"Readme file check completed", f"Recommendation: {recommendation}"
|
||||
)
|
||||
# Star the repository
|
||||
return {
|
||||
"success": True,
|
||||
"message": "Readme file check completed",
|
||||
"data": {
|
||||
"recommendation": recommendation == "APPROVE",
|
||||
},
|
||||
}
|
||||
except Exception as e:
|
||||
log_error(e, "Readme file check workflow failed")
|
||||
print(e)
|
||||
return {
|
||||
"success": False,
|
||||
"message": f"Readme file check workflow failed: {str(e)}",
|
||||
"data": {
|
||||
"recommendation": False,
|
||||
},
|
||||
}
|
57
worker/orca-agent/src/workflows/starRepo/__main__.py
Normal file
57
worker/orca-agent/src/workflows/starRepo/__main__.py
Normal file
@ -0,0 +1,57 @@
|
||||
"""Entry point for the todo creator workflow."""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import argparse
|
||||
from dotenv import load_dotenv
|
||||
from src.workflows.starRepo.workflow import StarRepoWorkflow
|
||||
from src.workflows.starRepo.prompts import PROMPTS
|
||||
from prometheus_swarm.clients import setup_client
|
||||
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
|
||||
|
||||
def main():
|
||||
"""Run the todo creator workflow."""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Create tasks from a feature specification for a GitHub repository"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--repo",
|
||||
type=str,
|
||||
required=True,
|
||||
help="GitHub repository URL (e.g., https://github.com/owner/repo)",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--model",
|
||||
type=str,
|
||||
default="anthropic",
|
||||
choices=["anthropic", "openai", "xai"],
|
||||
help="Model provider to use (default: anthropic)",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
# Initialize client
|
||||
client = setup_client(args.model)
|
||||
|
||||
# Run the todo creator workflow
|
||||
workflow = StarRepoWorkflow(
|
||||
client=client,
|
||||
prompts=PROMPTS,
|
||||
repo_url=args.repo,
|
||||
)
|
||||
|
||||
|
||||
result = workflow.run()
|
||||
if not result or not result.get("success"):
|
||||
print("Todo creator workflow failed")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
15
worker/orca-agent/src/workflows/starRepo/phases.py
Normal file
15
worker/orca-agent/src/workflows/starRepo/phases.py
Normal file
@ -0,0 +1,15 @@
|
||||
"""Task decomposition workflow phases implementation."""
|
||||
|
||||
from prometheus_swarm.workflows.base import WorkflowPhase, Workflow
|
||||
|
||||
|
||||
class ReadmeGenerationPhase(WorkflowPhase):
|
||||
def __init__(self, workflow: Workflow, conversation_id: str = None):
|
||||
super().__init__(
|
||||
workflow=workflow,
|
||||
prompt_name="generate_readme_file",
|
||||
available_tools=["read_file", "write_file", "list_files", "commit_and_push"],
|
||||
conversation_id=conversation_id,
|
||||
name="Readme Generation",
|
||||
)
|
||||
|
29
worker/orca-agent/src/workflows/starRepo/prompts.py
Normal file
29
worker/orca-agent/src/workflows/starRepo/prompts.py
Normal file
@ -0,0 +1,29 @@
|
||||
"""Prompts for the repository summarization workflow."""
|
||||
|
||||
PROMPTS = {
|
||||
"system_prompt": (
|
||||
"You are an expert software architect and technical lead specializing in summarizing "
|
||||
"repositories into comprehensive documentation. You excel at analyzing codebases "
|
||||
"and creating clear, structured documentation."
|
||||
),
|
||||
|
||||
"generate_readme_file": (
|
||||
"Generate a comprehensive README file for the following repository:\n"
|
||||
"Repository: {repo_url}\n\n"
|
||||
"Please include:\n"
|
||||
"1. Project Overview\n"
|
||||
" - Purpose and main functionality\n"
|
||||
" - Key features\n"
|
||||
"2. Repository Structure\n"
|
||||
" - Detailed breakdown of directories and their purposes\n"
|
||||
" - Key files and their roles\n"
|
||||
"3. Technical Details\n"
|
||||
" - Technologies used\n"
|
||||
" - Architecture overview\n"
|
||||
"4. File Contents\n"
|
||||
" - Specific description of each significant file\n\n"
|
||||
"Format the output in markdown, ensuring clear section headers and proper formatting."
|
||||
"Please commit and push the changes to the repository after generating the README file."
|
||||
),
|
||||
|
||||
}
|
141
worker/orca-agent/src/workflows/starRepo/workflow.py
Normal file
141
worker/orca-agent/src/workflows/starRepo/workflow.py
Normal file
@ -0,0 +1,141 @@
|
||||
"""Task decomposition workflow implementation."""
|
||||
|
||||
import os
|
||||
from github import Github
|
||||
from prometheus_swarm.workflows.base import Workflow
|
||||
from prometheus_swarm.tools.github_operations.implementations import star_repository
|
||||
from prometheus_swarm.utils.logging import log_section, log_key_value, log_error
|
||||
from src.workflows.repoSummarizer import phases
|
||||
from prometheus_swarm.workflows.utils import (
|
||||
check_required_env_vars,
|
||||
validate_github_auth,
|
||||
)
|
||||
|
||||
|
||||
class Task:
|
||||
def __init__(self, title: str, description: str, acceptance_criteria: list[str]):
|
||||
self.title = title
|
||||
self.description = description
|
||||
self.acceptance_criteria = acceptance_criteria
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""Convert task to dictionary format."""
|
||||
return {
|
||||
"title": self.title,
|
||||
"description": self.description,
|
||||
"acceptance_criteria": self.acceptance_criteria,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict) -> "Task":
|
||||
"""Create task from dictionary."""
|
||||
return cls(
|
||||
title=data["title"],
|
||||
description=data["description"],
|
||||
acceptance_criteria=data["acceptance_criteria"],
|
||||
)
|
||||
|
||||
|
||||
class StarRepoWorkflow(Workflow):
|
||||
def __init__(
|
||||
self,
|
||||
client,
|
||||
prompts,
|
||||
repo_url,
|
||||
):
|
||||
# Extract owner and repo name from URL
|
||||
# URL format: https://github.com/owner/repo
|
||||
parts = repo_url.strip("/").split("/")
|
||||
repo_owner = parts[-2]
|
||||
repo_name = parts[-1]
|
||||
|
||||
super().__init__(
|
||||
client=client,
|
||||
prompts=prompts,
|
||||
repo_url=repo_url,
|
||||
repo_owner=repo_owner,
|
||||
repo_name=repo_name,
|
||||
)
|
||||
self.context["repo_owner"] = repo_owner
|
||||
self.context["repo_name"] = repo_name
|
||||
self.context["github_token"] = os.getenv("GITHUB_TOKEN")
|
||||
|
||||
def setup(self):
|
||||
"""Set up repository and workspace."""
|
||||
check_required_env_vars(["GITHUB_TOKEN", "GITHUB_USERNAME"])
|
||||
validate_github_auth(os.getenv("GITHUB_TOKEN"), os.getenv("GITHUB_USERNAME"))
|
||||
|
||||
# # Get the default branch from GitHub
|
||||
# try:
|
||||
# gh = Github(os.getenv("GITHUB_TOKEN"))
|
||||
# repo = gh.get_repo(
|
||||
# f"{self.context['repo_owner']}/{self.context['repo_name']}"
|
||||
# )
|
||||
# self.context["base_branch"] = repo.default_branch
|
||||
# log_key_value("Default branch", self.context["base_branch"])
|
||||
# except Exception as e:
|
||||
# log_error(e, "Failed to get default branch, using 'main'")
|
||||
# self.context["base_branch"] = "main"
|
||||
|
||||
# Set up repository directory
|
||||
# repo_path, original_dir = setup_repo_directory()
|
||||
# self.context["repo_path"] = repo_path
|
||||
# self.original_dir = original_dir
|
||||
|
||||
# # Fork and clone repository
|
||||
# log_section("FORKING AND CLONING REPOSITORY")
|
||||
# fork_result = fork_repository(
|
||||
# f"{self.context['repo_owner']}/{self.context['repo_name']}",
|
||||
# self.context["repo_path"],
|
||||
# )
|
||||
# if not fork_result["success"]:
|
||||
# error = fork_result.get("error", "Unknown error")
|
||||
# log_error(Exception(error), "Fork failed")
|
||||
# raise Exception(error)
|
||||
|
||||
# # Enter repo directory
|
||||
# os.chdir(self.context["repo_path"])
|
||||
|
||||
# # Configure Git user info
|
||||
# setup_git_user_config(self.context["repo_path"])
|
||||
|
||||
# Get current files for context
|
||||
|
||||
def cleanup(self):
|
||||
"""Cleanup workspace."""
|
||||
# cleanup_repository(self.original_dir, self.context.get("repo_path", ""))
|
||||
# Make sure we're not in the repo directory before cleaning up
|
||||
# if os.getcwd() == self.context.get("repo_path", ""):
|
||||
# os.chdir(self.original_dir)
|
||||
|
||||
# # Clean up the repository directory
|
||||
# cleanup_repo_directory(self.original_dir, self.context.get("repo_path", ""))
|
||||
# Clean up the MongoDB
|
||||
|
||||
def run(self):
|
||||
star_repo_result = self.start_star_repo()
|
||||
return star_repo_result
|
||||
|
||||
def start_star_repo(self):
|
||||
"""Execute the issue generation workflow."""
|
||||
try:
|
||||
self.setup()
|
||||
# ==================== Generate issues ====================
|
||||
star_repo_result = star_repository(
|
||||
self.context["repo_owner"], self.context["repo_name"], self.context["github_token"]
|
||||
)
|
||||
if not star_repo_result or not star_repo_result.get("success"):
|
||||
log_error(
|
||||
Exception(star_repo_result.get("error", "No result")),
|
||||
"Repository star failed",
|
||||
)
|
||||
return None
|
||||
return star_repo_result
|
||||
except Exception as e:
|
||||
log_error(e, "Readme file generation workflow failed")
|
||||
print(e)
|
||||
return {
|
||||
"success": False,
|
||||
"message": f"Readme file generation workflow failed: {str(e)}",
|
||||
"data": None,
|
||||
}
|
58
worker/orca-agent/src/workflows/starRepoAudit/__main__.py
Normal file
58
worker/orca-agent/src/workflows/starRepoAudit/__main__.py
Normal file
@ -0,0 +1,58 @@
|
||||
"""Entry point for the todo creator workflow."""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import argparse
|
||||
from dotenv import load_dotenv
|
||||
from src.workflows.starRepoAudit.workflow import StarRepoAuditWorkflow
|
||||
from src.workflows.starRepoAudit.prompts import PROMPTS
|
||||
from prometheus_swarm.clients import setup_client
|
||||
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
|
||||
|
||||
def main():
|
||||
"""Run the todo creator workflow."""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Create tasks from a feature specification for a GitHub repository"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--repo",
|
||||
type=str,
|
||||
required=True,
|
||||
help="GitHub repository URL (e.g., https://github.com/owner/repo)",
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--model",
|
||||
type=str,
|
||||
default="anthropic",
|
||||
choices=["anthropic", "openai", "xai"],
|
||||
help="Model provider to use (default: anthropic)",
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
# Initialize client
|
||||
client = setup_client(args.model)
|
||||
|
||||
# Run the todo creator workflow
|
||||
workflow = StarRepoAuditWorkflow(
|
||||
client=client,
|
||||
prompts=PROMPTS,
|
||||
repo_url=args.repo,
|
||||
github_username="HermanL02",
|
||||
)
|
||||
|
||||
|
||||
result = workflow.run()
|
||||
if not result or not result.get("success"):
|
||||
print("Todo creator workflow failed")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
15
worker/orca-agent/src/workflows/starRepoAudit/phases.py
Normal file
15
worker/orca-agent/src/workflows/starRepoAudit/phases.py
Normal file
@ -0,0 +1,15 @@
|
||||
"""Task decomposition workflow phases implementation."""
|
||||
|
||||
from src.workflows.base import WorkflowPhase, Workflow
|
||||
|
||||
|
||||
class ReadmeGenerationPhase(WorkflowPhase):
|
||||
def __init__(self, workflow: Workflow, conversation_id: str = None):
|
||||
super().__init__(
|
||||
workflow=workflow,
|
||||
prompt_name="generate_readme_file",
|
||||
available_tools=["read_file", "write_file", "list_files", "commit_and_push"],
|
||||
conversation_id=conversation_id,
|
||||
name="Readme Generation",
|
||||
)
|
||||
|
29
worker/orca-agent/src/workflows/starRepoAudit/prompts.py
Normal file
29
worker/orca-agent/src/workflows/starRepoAudit/prompts.py
Normal file
@ -0,0 +1,29 @@
|
||||
"""Prompts for the repository summarization workflow."""
|
||||
|
||||
PROMPTS = {
|
||||
"system_prompt": (
|
||||
"You are an expert software architect and technical lead specializing in summarizing "
|
||||
"repositories into comprehensive documentation. You excel at analyzing codebases "
|
||||
"and creating clear, structured documentation."
|
||||
),
|
||||
|
||||
"generate_readme_file": (
|
||||
"Generate a comprehensive README file for the following repository:\n"
|
||||
"Repository: {repo_url}\n\n"
|
||||
"Please include:\n"
|
||||
"1. Project Overview\n"
|
||||
" - Purpose and main functionality\n"
|
||||
" - Key features\n"
|
||||
"2. Repository Structure\n"
|
||||
" - Detailed breakdown of directories and their purposes\n"
|
||||
" - Key files and their roles\n"
|
||||
"3. Technical Details\n"
|
||||
" - Technologies used\n"
|
||||
" - Architecture overview\n"
|
||||
"4. File Contents\n"
|
||||
" - Specific description of each significant file\n\n"
|
||||
"Format the output in markdown, ensuring clear section headers and proper formatting."
|
||||
"Please commit and push the changes to the repository after generating the README file."
|
||||
),
|
||||
|
||||
}
|
151
worker/orca-agent/src/workflows/starRepoAudit/workflow.py
Normal file
151
worker/orca-agent/src/workflows/starRepoAudit/workflow.py
Normal file
@ -0,0 +1,151 @@
|
||||
"""Task decomposition workflow implementation."""
|
||||
|
||||
import os
|
||||
from github import Github
|
||||
from prometheus_swarm.workflows.base import Workflow
|
||||
from prometheus_swarm.tools.github_operations.implementations import (
|
||||
get_user_starred_repos,
|
||||
)
|
||||
from prometheus_swarm.utils.logging import log_section, log_key_value, log_error
|
||||
from src.workflows.repoSummarizer import phases
|
||||
from prometheus_swarm.workflows.utils import (
|
||||
check_required_env_vars,
|
||||
validate_github_auth,
|
||||
)
|
||||
|
||||
|
||||
class Task:
|
||||
def __init__(self, title: str, description: str, acceptance_criteria: list[str]):
|
||||
self.title = title
|
||||
self.description = description
|
||||
self.acceptance_criteria = acceptance_criteria
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
"""Convert task to dictionary format."""
|
||||
return {
|
||||
"title": self.title,
|
||||
"description": self.description,
|
||||
"acceptance_criteria": self.acceptance_criteria,
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict) -> "Task":
|
||||
"""Create task from dictionary."""
|
||||
return cls(
|
||||
title=data["title"],
|
||||
description=data["description"],
|
||||
acceptance_criteria=data["acceptance_criteria"],
|
||||
)
|
||||
|
||||
|
||||
class StarRepoAuditWorkflow(Workflow):
|
||||
def __init__(
|
||||
self,
|
||||
client,
|
||||
prompts,
|
||||
repo_url,
|
||||
github_username,
|
||||
):
|
||||
# Extract owner and repo name from URL
|
||||
# URL format: https://github.com/owner/repo
|
||||
parts = repo_url.strip("/").split("/")
|
||||
repo_owner = parts[-2]
|
||||
repo_name = parts[-1]
|
||||
|
||||
super().__init__(
|
||||
client=client,
|
||||
prompts=prompts,
|
||||
repo_url=repo_url,
|
||||
repo_owner=repo_owner,
|
||||
repo_name=repo_name,
|
||||
github_username=github_username,
|
||||
)
|
||||
self.context["repo_owner"] = repo_owner
|
||||
self.context["repo_name"] = repo_name
|
||||
self.context["github_username"] = github_username
|
||||
|
||||
def setup(self):
|
||||
"""Set up repository and workspace."""
|
||||
check_required_env_vars(["GITHUB_TOKEN", "GITHUB_USERNAME"])
|
||||
validate_github_auth(os.getenv("GITHUB_TOKEN"), os.getenv("GITHUB_USERNAME"))
|
||||
|
||||
# # Get the default branch from GitHub
|
||||
# try:
|
||||
# gh = Github(os.getenv("GITHUB_TOKEN"))
|
||||
# repo = gh.get_repo(
|
||||
# f"{self.context['repo_owner']}/{self.context['repo_name']}"
|
||||
# )
|
||||
# self.context["base_branch"] = repo.default_branch
|
||||
# log_key_value("Default branch", self.context["base_branch"])
|
||||
# except Exception as e:
|
||||
# log_error(e, "Failed to get default branch, using 'main'")
|
||||
# self.context["base_branch"] = "main"
|
||||
|
||||
# # Set up repository directory
|
||||
# repo_path, original_dir = setup_repo_directory()
|
||||
# self.context["repo_path"] = repo_path
|
||||
# self.original_dir = original_dir
|
||||
|
||||
# # Fork and clone repository
|
||||
# log_section("FORKING AND CLONING REPOSITORY")
|
||||
# fork_result = fork_repository(
|
||||
# f"{self.context['repo_owner']}/{self.context['repo_name']}",
|
||||
# self.context["repo_path"],
|
||||
# )
|
||||
# if not fork_result["success"]:
|
||||
# error = fork_result.get("error", "Unknown error")
|
||||
# log_error(Exception(error), "Fork failed")
|
||||
# raise Exception(error)
|
||||
|
||||
# # Enter repo directory
|
||||
# os.chdir(self.context["repo_path"])
|
||||
|
||||
# # Configure Git user info
|
||||
# setup_git_user_config(self.context["repo_path"])
|
||||
|
||||
# # Get current files for context
|
||||
|
||||
def cleanup(self):
|
||||
"""Cleanup workspace."""
|
||||
# # Make sure we're not in the repo directory before cleaning up
|
||||
# if os.getcwd() == self.context.get("repo_path", ""):
|
||||
# os.chdir(self.original_dir)
|
||||
|
||||
# # Clean up the repository directory
|
||||
# cleanup_repo_directory(self.original_dir, self.context.get("repo_path", ""))
|
||||
# Clean up the MongoDB
|
||||
|
||||
def run(self):
|
||||
star_repo_result = self.check_star_repo()
|
||||
if not star_repo_result:
|
||||
log_error(
|
||||
Exception("Repository is not starred"), "Repository is not starred"
|
||||
)
|
||||
return False
|
||||
return star_repo_result
|
||||
|
||||
def check_star_repo(self):
|
||||
"""Check if the repository is starred."""
|
||||
try:
|
||||
print(self.context["github_username"])
|
||||
|
||||
starred_repos = get_user_starred_repos(self.context["github_username"])
|
||||
print(starred_repos)
|
||||
if not starred_repos or not starred_repos.get("success"):
|
||||
log_error(
|
||||
Exception(starred_repos.get("error", "No result")),
|
||||
"Failed to get starred repositories",
|
||||
)
|
||||
return False
|
||||
# check if the repository is in the starred_repos
|
||||
if f"{self.context['repo_owner']}/{self.context['repo_name']}" in [
|
||||
repo["full_name"] for repo in starred_repos["data"]["starred_repos"]
|
||||
]:
|
||||
print("Repository is starred")
|
||||
return {"success": True, "result": "Repository is starred"}
|
||||
else:
|
||||
print("Repository is not starred")
|
||||
return {"success": False, "result": "Repository is not starred"}
|
||||
except Exception as e:
|
||||
log_error(e, "Failed to check if repository is starred")
|
||||
return False
|
76
worker/package.json
Normal file
76
worker/package.json
Normal file
@ -0,0 +1,76 @@
|
||||
{
|
||||
"name": "orca-task",
|
||||
"version": "2.2.0",
|
||||
"description": "",
|
||||
"main": "src/index.js",
|
||||
"type": "module",
|
||||
"scripts": {
|
||||
"build": "tsc",
|
||||
"test": "npm run build && node build/tests/simulateTask.js",
|
||||
"jest-test": "jest --detectOpenHandles",
|
||||
"start": "node index.js",
|
||||
"prod-debug": "nodemon --ignore 'dist/*' tests/prod-debug.js",
|
||||
"webpack": "rm -rf dist && rm -rf node_modules && yarn && webpack",
|
||||
"webpack:test": "webpack --config tests/test.webpack.config.js",
|
||||
"webpack:prod": "webpack --mode production",
|
||||
"webpack:dev": "webpack",
|
||||
"rename-dist": "mv dist/main.js dist/bafybeianaylvcqh42l7pitsboznlpni3b2gqh2n6jbldmm6oty36ldisra.js"
|
||||
},
|
||||
"author": "",
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"@_koii/orca-node": "^0.1.18",
|
||||
"@_koii/storage-task-sdk": "^1.2.7",
|
||||
"@_koii/task-manager": "^1.0.13",
|
||||
"@_koii/web3.js": "^0.1.11",
|
||||
"@octokit/rest": "^21.1.1",
|
||||
"axios": "^1.7.2",
|
||||
"cross-spawn": "^7.0.3",
|
||||
"dotenv": "^16.5.0",
|
||||
"dotenv-webpack": "^8.1.0",
|
||||
"js-yaml": "^4.1.0",
|
||||
"lodash": "^4.17.21",
|
||||
"nodemon": "^3.1.0",
|
||||
"seedrandom": "^3.0.5",
|
||||
"tail": "^2.2.6"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"@_koii/namespace-wrapper": "^1.0.23"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@_koii/namespace-wrapper": "^1.0.23",
|
||||
"@babel/preset-env": "^7.26.0",
|
||||
"@babel/preset-typescript": "^7.26.0",
|
||||
"@types/cross-spawn": "^6.0.6",
|
||||
"@types/eslint": "^9.6.1",
|
||||
"@types/express": "^5.0.0",
|
||||
"@types/jest": "^29.5.14",
|
||||
"@types/js-yaml": "^4.0.9",
|
||||
"@types/lodash": "^4.17.9",
|
||||
"@types/node": "^22.14.1",
|
||||
"@types/seedrandom": "^3.0.8",
|
||||
"@types/tail": "^2.2.3",
|
||||
"@typescript-eslint/eslint-plugin": "^8.7.0",
|
||||
"@typescript-eslint/parser": "^8.7.0",
|
||||
"axios": "^1.7.2",
|
||||
"babel-jest": "^29.7.0",
|
||||
"chalk": "^5.3.0",
|
||||
"cross-spawn": "^7.0.3",
|
||||
"dotenv-webpack": "^8.1.0",
|
||||
"eslint": "^8.57.0",
|
||||
"globals": "^15.9.0",
|
||||
"jest": "^29.7.0",
|
||||
"joi": "^17.9.2",
|
||||
"prettier": "^3.3.3",
|
||||
"tail": "^2.2.6",
|
||||
"ts-loader": "^9.5.1",
|
||||
"ts-node": "^10.9.2",
|
||||
"tsx": "^4.19.3",
|
||||
"typescript": "^5.6.2",
|
||||
"webpack": "^5.28.0",
|
||||
"webpack-cli": "^4.5.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=18.17.0"
|
||||
}
|
||||
}
|
20
worker/src/index.ts
Normal file
20
worker/src/index.ts
Normal file
@ -0,0 +1,20 @@
|
||||
import { initializeTaskManager } from "@_koii/task-manager";
|
||||
import { setup } from "./task/0-setup";
|
||||
import { task } from "./task/1-task";
|
||||
import { submission } from "./task/2-submission";
|
||||
import { audit } from "./task/3-audit";
|
||||
import { distribution } from "./task/4-distribution";
|
||||
import { routes } from "./task/5-routes";
|
||||
|
||||
import { initializeOrcaClient } from "@_koii/task-manager/extensions";
|
||||
import { getConfig } from "./orcaSettings";
|
||||
|
||||
initializeTaskManager({
|
||||
setup,
|
||||
task,
|
||||
submission,
|
||||
audit,
|
||||
distribution,
|
||||
routes,
|
||||
});
|
||||
initializeOrcaClient(getConfig);
|
46
worker/src/orcaSettings.ts
Normal file
46
worker/src/orcaSettings.ts
Normal file
@ -0,0 +1,46 @@
|
||||
import { TASK_ID, namespaceWrapper } from "@_koii/namespace-wrapper";
|
||||
import "dotenv/config";
|
||||
|
||||
const imageUrl = "docker.io/hermanyiqunliang/summarizer-agent:0.2";
|
||||
|
||||
async function createPodSpec(): Promise<string> {
|
||||
const basePath = await namespaceWrapper.getBasePath();
|
||||
|
||||
const podSpec = `apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: 247-builder-test
|
||||
spec:
|
||||
containers:
|
||||
- name: user-${TASK_ID}
|
||||
image: ${imageUrl}
|
||||
env:
|
||||
- name: GITHUB_TOKEN
|
||||
value: "${process.env.GITHUB_TOKEN}"
|
||||
- name: GITHUB_USERNAME
|
||||
value: "${process.env.GITHUB_USERNAME}"
|
||||
- name: ANTHROPIC_API_KEY
|
||||
value: "${process.env.ANTHROPIC_API_KEY}"
|
||||
volumeMounts:
|
||||
- name: builder-data
|
||||
mountPath: /data
|
||||
volumes:
|
||||
- name: builder-data
|
||||
hostPath:
|
||||
path: ${basePath}/orca/data
|
||||
type: DirectoryOrCreate
|
||||
`;
|
||||
return podSpec;
|
||||
}
|
||||
|
||||
export async function getConfig(): Promise<{
|
||||
imageURL: string;
|
||||
customPodSpec: string;
|
||||
rootCA: string | null;
|
||||
}> {
|
||||
return {
|
||||
imageURL: imageUrl,
|
||||
customPodSpec: await createPodSpec(),
|
||||
rootCA: null,
|
||||
};
|
||||
}
|
4
worker/src/task/0-setup.ts
Normal file
4
worker/src/task/0-setup.ts
Normal file
@ -0,0 +1,4 @@
|
||||
export async function setup(): Promise<void> {
|
||||
// define any steps that must be executed before the task starts
|
||||
console.log("CUSTOM SETUP");
|
||||
}
|
215
worker/src/task/1-task.ts
Normal file
215
worker/src/task/1-task.ts
Normal file
@ -0,0 +1,215 @@
|
||||
import { getOrcaClient } from "@_koii/task-manager/extensions";
|
||||
import { namespaceWrapper, TASK_ID } from "@_koii/namespace-wrapper";
|
||||
import "dotenv/config";
|
||||
import { getRandomNodes } from "../utils/leader";
|
||||
import { getExistingIssues } from "../utils/existingIssues";
|
||||
import { status, middleServerUrl } from "../utils/constant";
|
||||
import dotenv from "dotenv";
|
||||
import { checkAnthropicAPIKey, isValidAnthropicApiKey } from "../utils/anthropicCheck";
|
||||
import { checkGitHub } from "../utils/githubCheck";
|
||||
import { LogLevel } from "@_koii/namespace-wrapper/dist/types";
|
||||
import { actionMessage } from "../utils/constant";
|
||||
import { errorMessage } from "../utils/constant";
|
||||
dotenv.config();
|
||||
|
||||
|
||||
export async function task(roundNumber: number): Promise<void> {
|
||||
/**
|
||||
* Run your task and store the proofs to be submitted for auditing
|
||||
* It is expected you will store the proofs in your container
|
||||
* The submission of the proofs is done in the submission function
|
||||
*/
|
||||
// FORCE TO PAUSE 30 SECONDS
|
||||
// No submission on Round 0 so no need to trigger fetch audit result before round 3
|
||||
// Changed from 3 to 4 to have more time
|
||||
if (roundNumber >= 4) {
|
||||
const triggerFetchAuditResult = await fetch(`${middleServerUrl}/api/builder/summarizer/trigger-fetch-audit-result`, {
|
||||
method: "POST",
|
||||
headers: {
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
body: JSON.stringify({ taskId: TASK_ID, round: roundNumber - 4 })
|
||||
});
|
||||
console.log(`[TASK] Trigger fetch audit result for round ${roundNumber - 3}. Result is ${triggerFetchAuditResult.status}.`);
|
||||
}
|
||||
console.log(`[TASK] EXECUTE TASK FOR ROUND ${roundNumber}`);
|
||||
try {
|
||||
const orcaClient = await getOrcaClient();
|
||||
// check if the env variable is valid
|
||||
if (!process.env.ANTHROPIC_API_KEY) {
|
||||
await namespaceWrapper.logMessage(LogLevel.Error, errorMessage.ANTHROPIC_API_KEY_INVALID, actionMessage.ANTHROPIC_API_KEY_INVALID);
|
||||
await namespaceWrapper.storeSet(`result-${roundNumber}`, status.ANTHROPIC_API_KEY_INVALID);
|
||||
return;
|
||||
}
|
||||
if (!isValidAnthropicApiKey(process.env.ANTHROPIC_API_KEY!)) {
|
||||
await namespaceWrapper.logMessage(LogLevel.Error, errorMessage.ANTHROPIC_API_KEY_INVALID, actionMessage.ANTHROPIC_API_KEY_INVALID);
|
||||
await namespaceWrapper.storeSet(`result-${roundNumber}`, status.ANTHROPIC_API_KEY_INVALID);
|
||||
return;
|
||||
}
|
||||
const isAnthropicAPIKeyValid = await checkAnthropicAPIKey(process.env.ANTHROPIC_API_KEY!);
|
||||
if (!isAnthropicAPIKeyValid) {
|
||||
await namespaceWrapper.logMessage(LogLevel.Error, errorMessage.ANTHROPIC_API_KEY_NO_CREDIT, actionMessage.ANTHROPIC_API_KEY_NO_CREDIT);
|
||||
await namespaceWrapper.storeSet(`result-${roundNumber}`, status.ANTHROPIC_API_KEY_NO_CREDIT);
|
||||
return;
|
||||
}
|
||||
if (!process.env.GITHUB_USERNAME || !process.env.GITHUB_TOKEN) {
|
||||
await namespaceWrapper.logMessage(LogLevel.Error, errorMessage.GITHUB_CHECK_FAILED, actionMessage.GITHUB_CHECK_FAILED);
|
||||
await namespaceWrapper.storeSet(`result-${roundNumber}`, status.GITHUB_CHECK_FAILED);
|
||||
return;
|
||||
}
|
||||
const isGitHubValid = await checkGitHub(process.env.GITHUB_USERNAME!, process.env.GITHUB_TOKEN!);
|
||||
if (!isGitHubValid) {
|
||||
await namespaceWrapper.logMessage(LogLevel.Error, errorMessage.GITHUB_CHECK_FAILED, actionMessage.GITHUB_CHECK_FAILED);
|
||||
await namespaceWrapper.storeSet(`result-${roundNumber}`, status.GITHUB_CHECK_FAILED);
|
||||
return;
|
||||
}
|
||||
if (!orcaClient) {
|
||||
await namespaceWrapper.logMessage(LogLevel.Error, errorMessage.NO_ORCA_CLIENT, actionMessage.NO_ORCA_CLIENT);
|
||||
await namespaceWrapper.storeSet(`result-${roundNumber}`, status.NO_ORCA_CLIENT);
|
||||
return;
|
||||
}
|
||||
|
||||
const stakingKeypair = await namespaceWrapper.getSubmitterAccount();
|
||||
if (!stakingKeypair) {
|
||||
throw new Error("No staking keypair found");
|
||||
}
|
||||
const stakingKey = stakingKeypair.publicKey.toBase58();
|
||||
const pubKey = await namespaceWrapper.getMainAccountPubkey();
|
||||
if (!pubKey) {
|
||||
throw new Error("No public key found");
|
||||
}
|
||||
/****************** All issues need to be starred ******************/
|
||||
|
||||
const existingIssues = await getExistingIssues();
|
||||
const githubUrls = existingIssues.map((issue) => issue.githubUrl);
|
||||
try {
|
||||
await orcaClient.podCall(`star/${roundNumber}`, {
|
||||
method: "POST",
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
body: JSON.stringify({ taskId: TASK_ID, round_number: String(roundNumber), github_urls: githubUrls }),
|
||||
});
|
||||
} catch (error) {
|
||||
await namespaceWrapper.storeSet(`result-${roundNumber}`, status.STAR_ISSUE_FAILED);
|
||||
console.error("Error starring issues:", error);
|
||||
}
|
||||
/****************** All these issues need to be generate a markdown file ******************/
|
||||
|
||||
const signature = await namespaceWrapper.payloadSigning(
|
||||
{
|
||||
taskId: TASK_ID,
|
||||
roundNumber: roundNumber,
|
||||
action: "fetch",
|
||||
githubUsername: stakingKey,
|
||||
stakingKey: stakingKey
|
||||
},
|
||||
stakingKeypair.secretKey,
|
||||
);
|
||||
|
||||
// const initializedDocumentSummarizeIssues = await getInitializedDocumentSummarizeIssues(existingIssues);
|
||||
|
||||
console.log(`[TASK] Making Request to Middle Server with taskId: ${TASK_ID} and round: ${roundNumber}`);
|
||||
const requiredWorkResponse = await fetch(`${middleServerUrl}/api/builder/summarizer/fetch-summarizer-todo`, {
|
||||
method: "POST",
|
||||
headers: {
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
body: JSON.stringify({ signature: signature, stakingKey: stakingKey }),
|
||||
});
|
||||
// check if the response is 200
|
||||
if (requiredWorkResponse.status !== 200) {
|
||||
await namespaceWrapper.storeSet(`result-${roundNumber}`, status.NO_ISSUES_PENDING_TO_BE_SUMMARIZED);
|
||||
return;
|
||||
}
|
||||
const requiredWorkResponseData = await requiredWorkResponse.json();
|
||||
console.log("[TASK] requiredWorkResponseData: ", requiredWorkResponseData);
|
||||
|
||||
const jsonBody = {
|
||||
taskId: TASK_ID,
|
||||
round_number: String(roundNumber),
|
||||
repo_url: `https://github.com/${requiredWorkResponseData.data.repo_owner}/${requiredWorkResponseData.data.repo_name}`,
|
||||
};
|
||||
console.log("[TASK] jsonBody: ", jsonBody);
|
||||
try {
|
||||
const repoSummaryResponse = await orcaClient.podCall(`repo_summary/${roundNumber}`, {
|
||||
method: "POST",
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
body: JSON.stringify(jsonBody),
|
||||
});
|
||||
console.log("[TASK] repoSummaryResponse: ", repoSummaryResponse);
|
||||
console.log("[TASK] repoSummaryResponse.data.result.data ", repoSummaryResponse.data.result.data);
|
||||
const payload = {
|
||||
taskId: TASK_ID,
|
||||
action: "add",
|
||||
roundNumber: roundNumber,
|
||||
prUrl: repoSummaryResponse.data.result.data.pr_url,
|
||||
stakingKey: stakingKey
|
||||
}
|
||||
console.log("[TASK] Signing payload: ", payload);
|
||||
if (repoSummaryResponse.status === 200) {
|
||||
try{
|
||||
const signature = await namespaceWrapper.payloadSigning(
|
||||
payload,
|
||||
stakingKeypair.secretKey,
|
||||
);
|
||||
console.log("[TASK] signature: ", signature);
|
||||
const addPrToSummarizerTodoResponse = await fetch(`${middleServerUrl}/api/builder/summarizer/add-pr-to-summarizer-todo`, {
|
||||
method: "POST",
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
body: JSON.stringify({ signature: signature, stakingKey: stakingKey }),
|
||||
});
|
||||
console.log("[TASK] addPrToSummarizerTodoResponse: ", addPrToSummarizerTodoResponse);
|
||||
}catch(error){
|
||||
await namespaceWrapper.storeSet(`result-${roundNumber}`, status.ISSUE_FAILED_TO_ADD_PR_TO_SUMMARIZER_TODO);
|
||||
console.error("[TASK] Error adding PR to summarizer todo:", error);
|
||||
}
|
||||
await namespaceWrapper.storeSet(`result-${roundNumber}`, status.ISSUE_SUCCESSFULLY_SUMMARIZED);
|
||||
} else {
|
||||
// post this summary response to slack` to notify the team
|
||||
// THE HOOK IS ALREADY DISABLED
|
||||
// try{
|
||||
// const slackResponse = await fetch('https://hooks.slack.com/services/', {
|
||||
// method: "POST",
|
||||
// headers: {
|
||||
// "Content-Type": "application/json",
|
||||
// },
|
||||
// body: JSON.stringify({
|
||||
// text: `[TASK] Error summarizing issue:\nStatus: ${repoSummaryResponse.status}\nData: ${JSON.stringify(repoSummaryResponse.data, null, 2)}`
|
||||
// }),
|
||||
// });
|
||||
// console.log("[TASK] slackResponse: ", slackResponse);
|
||||
// }catch(error){
|
||||
// console.error("[TASK] Error posting to slack:", error);
|
||||
// }
|
||||
|
||||
await namespaceWrapper.storeSet(`result-${roundNumber}`, status.ISSUE_FAILED_TO_BE_SUMMARIZED);
|
||||
}
|
||||
} catch (error) {
|
||||
await namespaceWrapper.storeSet(`result-${roundNumber}`, status.ISSUE_FAILED_TO_BE_SUMMARIZED);
|
||||
|
||||
// try{
|
||||
// const slackResponse = await fetch('https://hooks.slack.com/services', {
|
||||
// method: "POST",
|
||||
// headers: {
|
||||
// "Content-Type": "application/json",
|
||||
// },
|
||||
// body: JSON.stringify({
|
||||
// text: `[TASK] Error summarizing issue:\n ${JSON.stringify(error)}`
|
||||
// }),
|
||||
// });
|
||||
// console.log("[TASK] slackResponse: ", slackResponse);
|
||||
// }catch(error){
|
||||
// console.error("[TASK] Error posting to slack:", error);
|
||||
// }
|
||||
console.error("[TASK] EXECUTE TASK ERROR:", error);
|
||||
}
|
||||
} catch (error) {
|
||||
await namespaceWrapper.storeSet(`result-${roundNumber}`, status.UNKNOWN_ERROR);
|
||||
console.error("[TASK] EXECUTE TASK ERROR:", error);
|
||||
}
|
||||
}
|
102
worker/src/task/2-submission.ts
Normal file
102
worker/src/task/2-submission.ts
Normal file
@ -0,0 +1,102 @@
|
||||
import { storeFile } from "../utils/ipfs";
|
||||
import { getOrcaClient } from "@_koii/task-manager/extensions";
|
||||
import { namespaceWrapper, TASK_ID } from "@_koii/namespace-wrapper";
|
||||
import { status } from "../utils/constant";
|
||||
export async function submission(roundNumber: number) : Promise<string | void> {
|
||||
/**
|
||||
* Retrieve the task proofs from your container and submit for auditing
|
||||
* Must return a string of max 512 bytes to be submitted on chain
|
||||
* The default implementation handles uploading the proofs to IPFS
|
||||
* and returning the CID
|
||||
*/
|
||||
console.log(`[SUBMISSION] Starting submission process for round ${roundNumber}`);
|
||||
|
||||
try {
|
||||
console.log("[SUBMISSION] Initializing Orca client...");
|
||||
const orcaClient = await getOrcaClient();
|
||||
if (!orcaClient) {
|
||||
console.error("[SUBMISSION] Failed to initialize Orca client");
|
||||
return;
|
||||
}
|
||||
console.log("[SUBMISSION] Orca client initialized successfully");
|
||||
|
||||
console.log(`[SUBMISSION] Fetching task result for round ${roundNumber}...`);
|
||||
const taskResult = await namespaceWrapper.storeGet(`result-${roundNumber}`);
|
||||
if (!taskResult) {
|
||||
console.log("[SUBMISSION] No task result found for this round");
|
||||
return status.NO_DATA_FOR_THIS_ROUND;
|
||||
}
|
||||
console.log(`[SUBMISSION] Task result status: ${taskResult}`);
|
||||
|
||||
if (taskResult !== status.ISSUE_SUCCESSFULLY_SUMMARIZED) {
|
||||
console.log(`[SUBMISSION] Task not successfully summarized. Status: ${taskResult}`);
|
||||
return taskResult;
|
||||
}
|
||||
|
||||
console.log(`[SUBMISSION] Fetching submission data for round ${roundNumber}...`);
|
||||
const result = await orcaClient.podCall(`submission/${roundNumber}`);
|
||||
let submission;
|
||||
|
||||
console.log("[SUBMISSION] Submission result:", result.data);
|
||||
|
||||
if (result.data === "No submission") {
|
||||
console.log("[SUBMISSION] No existing submission found, creating new submission object");
|
||||
submission = {
|
||||
githubUsername: process.env.GITHUB_USERNAME,
|
||||
prUrl: "none",
|
||||
roundNumber,
|
||||
};
|
||||
} else {
|
||||
submission = result.data;
|
||||
}
|
||||
|
||||
console.log("[SUBMISSION] Validating submission data...");
|
||||
if (submission.roundNumber !== roundNumber) {
|
||||
console.error(`[SUBMISSION] Round number mismatch. Expected: ${roundNumber}, Got: ${submission.roundNumber}`);
|
||||
throw new Error("Submission is not for the current round");
|
||||
}
|
||||
|
||||
if (!submission.prUrl) {
|
||||
console.error("[SUBMISSION] Missing PR URL in submission");
|
||||
throw new Error("Submission is missing PR URL");
|
||||
}
|
||||
|
||||
console.log("[SUBMISSION] Submission data validated successfully:", submission);
|
||||
|
||||
console.log("[SUBMISSION] Getting submitter account...");
|
||||
const stakingKeypair = await namespaceWrapper.getSubmitterAccount();
|
||||
|
||||
if (!stakingKeypair) {
|
||||
console.error("[SUBMISSION] No staking keypair found");
|
||||
throw new Error("No staking keypair found");
|
||||
}
|
||||
console.log("[SUBMISSION] Submitter account retrieved successfully");
|
||||
|
||||
const stakingKey = stakingKeypair.publicKey.toBase58();
|
||||
const pubKey = await namespaceWrapper.getMainAccountPubkey();
|
||||
console.log("[SUBMISSION] Staking key:", stakingKey);
|
||||
console.log("[SUBMISSION] Public key:", pubKey);
|
||||
|
||||
console.log("[SUBMISSION] Signing submission payload...");
|
||||
const signature = await namespaceWrapper.payloadSigning(
|
||||
{
|
||||
taskId: TASK_ID,
|
||||
roundNumber,
|
||||
stakingKey,
|
||||
pubKey,
|
||||
action: "audit",
|
||||
...submission,
|
||||
},
|
||||
stakingKeypair.secretKey,
|
||||
);
|
||||
console.log("[SUBMISSION] Payload signed successfully");
|
||||
|
||||
console.log("[SUBMISSION] Storing submission on IPFS...");
|
||||
const cid = await storeFile({ signature }, "submission.json");
|
||||
console.log("[SUBMISSION] Submission stored successfully. CID:", cid);
|
||||
return cid || void 0;
|
||||
} catch (error) {
|
||||
console.error("[SUBMISSION] Error during submission process:", error);
|
||||
throw error;
|
||||
}
|
||||
}
|
84
worker/src/task/3-audit.ts
Normal file
84
worker/src/task/3-audit.ts
Normal file
@ -0,0 +1,84 @@
|
||||
import { getOrcaClient } from "@_koii/task-manager/extensions";
|
||||
import { middleServerUrl, status } from "../utils/constant";
|
||||
import { submissionJSONSignatureDecode } from "../utils/submissionJSONSignatureDecode";
|
||||
// import { status } from '../utils/constant'
|
||||
export async function audit(cid: string, roundNumber: number, submitterKey: string): Promise<boolean | void> {
|
||||
/**
|
||||
* Audit a submission
|
||||
* This function should return true if the submission is correct, false otherwise
|
||||
* The default implementation retrieves the proofs from IPFS
|
||||
* and sends them to your container for auditing
|
||||
*/
|
||||
|
||||
try {
|
||||
const orcaClient = await getOrcaClient();
|
||||
if (!orcaClient) {
|
||||
// await namespaceWrapper.storeSet(`result-${roundNumber}`, status.NO_ORCA_CLIENT);
|
||||
return;
|
||||
}
|
||||
// Check if the cid is one of the status
|
||||
if (Object.values(status).includes(cid)) {
|
||||
// This returns a dummy true
|
||||
return true;
|
||||
}
|
||||
const decodeResult = await submissionJSONSignatureDecode({submission_value: cid, submitterPublicKey: submitterKey, roundNumber: roundNumber});
|
||||
if (!decodeResult) {
|
||||
console.log("[AUDIT] DECODE RESULT FAILED.")
|
||||
return false;
|
||||
}
|
||||
console.log(`[AUDIT] ✅ Signature decoded successfully`);
|
||||
|
||||
console.log(`[AUDIT] Checking summarizer status for submitter ${submitterKey}`);
|
||||
const checkSummarizerResponse = await fetch(`${middleServerUrl}/api/builder/summarizer/check-summarizer`, {
|
||||
method: "POST",
|
||||
headers: {
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
body: JSON.stringify({
|
||||
stakingKey: submitterKey,
|
||||
roundNumber,
|
||||
githubUsername: decodeResult.githubUsername,
|
||||
prUrl: decodeResult.prUrl
|
||||
}),
|
||||
});
|
||||
const checkSummarizerJSON = await checkSummarizerResponse.json();
|
||||
console.log(`[AUDIT] Summarizer check response:`, checkSummarizerJSON);
|
||||
|
||||
if (!checkSummarizerJSON.success) {
|
||||
console.log(`[AUDIT] ❌ Audit failed for ${submitterKey}`);
|
||||
return false;
|
||||
}
|
||||
console.log(`[AUDIT] ✅ Summarizer check passed`);
|
||||
|
||||
console.log(`[AUDIT] Sending audit request for submitter: ${submitterKey}`);
|
||||
console.log(`[AUDIT] Submission data being sent to audit:`, decodeResult);
|
||||
|
||||
const result = await orcaClient.podCall(`audit/${roundNumber}`, {
|
||||
method: "POST",
|
||||
headers: {
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
body: JSON.stringify({
|
||||
submission: decodeResult,
|
||||
}),
|
||||
});
|
||||
|
||||
console.log(`[AUDIT] Raw audit result:`, result);
|
||||
console.log(`[AUDIT] Audit result data type:`, typeof result.data);
|
||||
console.log(`[AUDIT] Audit result data value:`, result.data);
|
||||
|
||||
if (result.data === true) {
|
||||
console.log(`[AUDIT] ✅ Audit passed for ${submitterKey}`);
|
||||
return true;
|
||||
} else {
|
||||
console.log(`[AUDIT] ❌ Audit failed for ${submitterKey}`);
|
||||
console.log(`[AUDIT] Failed audit result data:`, result.data);
|
||||
return false;
|
||||
}
|
||||
} catch (error) {
|
||||
console.error("[AUDIT] Error auditing submission:", error);
|
||||
|
||||
// When Error---NO RETURN;
|
||||
// return true;
|
||||
}
|
||||
}
|
64
worker/src/task/4-distribution.ts
Normal file
64
worker/src/task/4-distribution.ts
Normal file
@ -0,0 +1,64 @@
|
||||
import { Submitter, DistributionList } from "@_koii/task-manager";
|
||||
import { namespaceWrapper, TASK_ID } from "@_koii/namespace-wrapper";
|
||||
import { customReward, status } from "../utils/constant";
|
||||
import { Submission } from "@_koii/namespace-wrapper/dist/types";
|
||||
import { middleServerUrl } from "../utils/constant";
|
||||
import { getOrcaClient } from "@_koii/task-manager/extensions";
|
||||
import { submissionJSONSignatureDecode } from "../utils/submissionJSONSignatureDecode";
|
||||
import { getRandomNodes } from "../utils/leader";
|
||||
const getSubmissionList = async (roundNumber: number): Promise<Record<string, Submission>> => {
|
||||
const submissionInfo = await namespaceWrapper.getTaskSubmissionInfo(roundNumber);
|
||||
return submissionInfo?.submissions[roundNumber] || {};
|
||||
}
|
||||
export const getEmptyDistributionList = async (
|
||||
submitters: Submitter[],
|
||||
): Promise<DistributionList> => {
|
||||
const distributionList: DistributionList = {};
|
||||
for (const submitter of submitters) {
|
||||
distributionList[submitter.publicKey] = 0;
|
||||
}
|
||||
return distributionList;
|
||||
}
|
||||
export const distribution = async (
|
||||
submitters: Submitter[],
|
||||
bounty: number,
|
||||
roundNumber: number,
|
||||
): Promise<DistributionList> => {
|
||||
try {
|
||||
const distributionList: DistributionList = {};
|
||||
|
||||
for (const submitter of submitters) {
|
||||
console.log(`\n[DISTRIBUTION] Processing submitter: ${submitter.publicKey}`);
|
||||
|
||||
console.log(`[DISTRIBUTION] Getting submission list for round ${roundNumber}`);
|
||||
const submitterSubmissions = await getSubmissionList(roundNumber);
|
||||
console.log(`[DISTRIBUTION] Total submissions found: ${Object.keys(submitterSubmissions).length}`);
|
||||
|
||||
const submitterSubmission = submitterSubmissions[submitter.publicKey];
|
||||
if (!submitterSubmission || submitterSubmission.submission_value === "") {
|
||||
console.log(`[DISTRIBUTION] ❌ No valid submission found for submitter ${submitter.publicKey}`);
|
||||
distributionList[submitter.publicKey] = 0;
|
||||
continue;
|
||||
}
|
||||
if (Object.values(status).includes(submitterSubmission.submission_value)) {
|
||||
distributionList[submitter.publicKey] = 0;
|
||||
continue;
|
||||
}else{
|
||||
// TODO: Check if I should include = 0 here
|
||||
if (submitter.votes >= 0) {
|
||||
distributionList[submitter.publicKey] = customReward;
|
||||
}else{
|
||||
distributionList[submitter.publicKey] = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
console.log(`[DISTRIBUTION] ✅ Distribution completed successfully`);
|
||||
console.log(`[DISTRIBUTION] Final distribution list:`, distributionList);
|
||||
return distributionList;
|
||||
} catch (error: any) {
|
||||
console.error(`[DISTRIBUTION] ❌ ERROR IN DISTRIBUTION:`, error);
|
||||
console.error(`[DISTRIBUTION] Error stack:`, error.stack);
|
||||
return {};
|
||||
}
|
||||
};
|
57
worker/src/task/5-routes.ts
Normal file
57
worker/src/task/5-routes.ts
Normal file
@ -0,0 +1,57 @@
|
||||
import { namespaceWrapper, app } from "@_koii/task-manager/namespace-wrapper";
|
||||
import { getLeaderNode, getRandomNodes } from "../utils/leader";
|
||||
import { task } from "./1-task";
|
||||
import { submission } from "./2-submission";
|
||||
import { audit } from "./3-audit";
|
||||
import { distribution } from "./4-distribution";
|
||||
import { submissionJSONSignatureDecode } from "../utils/submissionJSONSignatureDecode";
|
||||
import { Submission } from "@_koii/namespace-wrapper/dist/types";
|
||||
import { taskRunner } from "@_koii/task-manager"
|
||||
|
||||
/**
|
||||
*
|
||||
* Define all your custom routes here
|
||||
*
|
||||
*/
|
||||
|
||||
//Example route
|
||||
export async function routes() {
|
||||
app.get("/value", async (_req, res) => {
|
||||
const value = await namespaceWrapper.storeGet("value");
|
||||
console.log("value", value);
|
||||
res.status(200).json({ value: value });
|
||||
});
|
||||
|
||||
app.get("/leader/:roundNumber/:submitterPublicKey", async (req, res) => {
|
||||
const roundNumber = req.params.roundNumber;
|
||||
const submitterPublicKey = req.params.submitterPublicKey;
|
||||
const {isLeader, leaderNode} = await getLeaderNode({roundNumber: Number(roundNumber), submitterPublicKey: submitterPublicKey});
|
||||
res.status(200).json({ isLeader: isLeader, leaderNode: leaderNode });
|
||||
});
|
||||
|
||||
app.get("/task/:roundNumber", async (req, res) => {
|
||||
console.log("task endpoint called with round number: ", req.params.roundNumber);
|
||||
const roundNumber = req.params.roundNumber;
|
||||
const taskResult = await task(Number(roundNumber));
|
||||
res.status(200).json({ result: taskResult });
|
||||
});
|
||||
app.get("/audit/:roundNumber/:cid/:submitterPublicKey", async (req, res) => {
|
||||
const cid = req.params.cid;
|
||||
const roundNumber = req.params.roundNumber;
|
||||
const submitterPublicKey = req.params.submitterPublicKey;
|
||||
const auditResult = await audit(cid, Number(roundNumber), submitterPublicKey);
|
||||
res.status(200).json({ result: auditResult });
|
||||
});
|
||||
app.get("/submission/:roundNumber", async (req, res) => {
|
||||
const roundNumber = req.params.roundNumber;
|
||||
const submissionResult = await submission(Number(roundNumber));
|
||||
res.status(200).json({ result: submissionResult });
|
||||
});
|
||||
|
||||
app.get("/submitDistribution/:roundNumber", async (req, res) => {
|
||||
const roundNumber = req.params.roundNumber;
|
||||
const submitDistributionResult = await taskRunner.submitDistributionList(Number(roundNumber));
|
||||
res.status(200).json({ result: submitDistributionResult });
|
||||
});
|
||||
|
||||
}
|
36
worker/src/utils/anthropicCheck.ts
Normal file
36
worker/src/utils/anthropicCheck.ts
Normal file
@ -0,0 +1,36 @@
|
||||
export function isValidAnthropicApiKey(key: string) {
|
||||
const regex = /^sk-ant-[a-zA-Z0-9_-]{32,}$/;
|
||||
return regex.test(key);
|
||||
}
|
||||
|
||||
export async function checkAnthropicAPIKey(apiKey: string) {
|
||||
const response = await fetch('https://api.anthropic.com/v1/messages', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'x-api-key': apiKey,
|
||||
'anthropic-version': '2023-06-01',
|
||||
'content-type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: 'claude-3-opus-20240229', // or a cheaper model
|
||||
max_tokens: 1, // minimal usage
|
||||
messages: [{ role: 'user', content: 'Hi' }],
|
||||
}),
|
||||
});
|
||||
|
||||
if (response.status === 200) {
|
||||
console.log('✅ API key is valid and has credit.');
|
||||
return true;
|
||||
} else {
|
||||
const data = await response.json().catch(() => ({}));
|
||||
if (response.status === 401) {
|
||||
console.log('❌ Invalid API key.');
|
||||
} else if (response.status === 403 && data.error?.message?.includes('billing')) {
|
||||
console.log('❌ API key has no credit or is not authorized.');
|
||||
} else {
|
||||
console.log('⚠️ Unexpected error:', data);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
57
worker/src/utils/constant.ts
Normal file
57
worker/src/utils/constant.ts
Normal file
@ -0,0 +1,57 @@
|
||||
import dotenv from "dotenv";
|
||||
dotenv.config();
|
||||
|
||||
|
||||
export const status = {
|
||||
ISSUE_FAILED_TO_BE_SUMMARIZED: "Issue failed to be summarized",
|
||||
ISSUE_SUCCESSFULLY_SUMMARIZED: "Issue successfully summarized",
|
||||
NO_ISSUES_PENDING_TO_BE_SUMMARIZED: "No issues pending to be summarized",
|
||||
ROUND_LESS_THAN_OR_EQUAL_TO_1: "Round <= 1",
|
||||
NO_ORCA_CLIENT: "No orca client",
|
||||
NO_CHOSEN_AS_ISSUE_SUMMARIZER: "No chosen as issue summarizer",
|
||||
UNKNOWN_ERROR: "Unknown error",
|
||||
STAR_ISSUE_FAILED: "Star issue failed",
|
||||
GITHUB_CHECK_FAILED: "GitHub check failed",
|
||||
ANTHROPIC_API_KEY_INVALID: "Anthropic API key invalid",
|
||||
ANTHROPIC_API_KEY_NO_CREDIT: "Anthropic API key has no credit",
|
||||
NO_DATA_FOR_THIS_ROUND: "No data for this round",
|
||||
ISSUE_FAILED_TO_ADD_PR_TO_SUMMARIZER_TODO: "Issue failed to add PR to summarizer todo",
|
||||
}
|
||||
|
||||
export const errorMessage = {
|
||||
ISSUE_FAILED_TO_BE_SUMMARIZED: "We couldn't summarize this issue. Please try again later.",
|
||||
ISSUE_SUCCESSFULLY_SUMMARIZED: "The issue was successfully summarized.",
|
||||
NO_ISSUES_PENDING_TO_BE_SUMMARIZED: "There are no issues waiting to be summarized at this time.",
|
||||
ROUND_LESS_THAN_OR_EQUAL_TO_1: "This operation requires a round number greater than 1.",
|
||||
NO_ORCA_CLIENT: "The Orca client is not available.",
|
||||
NO_CHOSEN_AS_ISSUE_SUMMARIZER: "You haven't been selected as an issue summarizer.",
|
||||
UNKNOWN_ERROR: "An unexpected error occurred. Please try again later.",
|
||||
STAR_ISSUE_FAILED: "We couldn't star the issue. Please try again later.",
|
||||
GITHUB_CHECK_FAILED: "The GitHub check failed. Please verify your GitHub Key.",
|
||||
ANTHROPIC_API_KEY_INVALID: "The Anthropic API Key is not valid. Please check your API key.",
|
||||
ANTHROPIC_API_KEY_NO_CREDIT: "Your Anthropic API key has no remaining credits.",
|
||||
NO_DATA_FOR_THIS_ROUND: "There is no data available for this round.",
|
||||
ISSUE_FAILED_TO_ADD_PR_TO_SUMMARIZER_TODO: "We couldn't add the PR to the summarizer todo list.",
|
||||
}
|
||||
|
||||
export const actionMessage = {
|
||||
ISSUE_FAILED_TO_BE_SUMMARIZED: "We couldn't summarize this issue. Please try again later.",
|
||||
ISSUE_SUCCESSFULLY_SUMMARIZED: "The issue was successfully summarized.",
|
||||
NO_ISSUES_PENDING_TO_BE_SUMMARIZED: "There are no issues waiting to be summarized at this time.",
|
||||
ROUND_LESS_THAN_OR_EQUAL_TO_1: "This operation requires a round number greater than 1.",
|
||||
NO_ORCA_CLIENT: "Please click Orca icon to connect your Orca Pod.",
|
||||
NO_CHOSEN_AS_ISSUE_SUMMARIZER: "You haven't been selected as an issue summarizer.",
|
||||
UNKNOWN_ERROR: "An unexpected error occurred. Please try again later.",
|
||||
STAR_ISSUE_FAILED: "We couldn't star the issue. Please try again later.",
|
||||
GITHUB_CHECK_FAILED: "Please go to the env variable page to update your GitHub Key.",
|
||||
ANTHROPIC_API_KEY_INVALID: "Please follow the guide under task description page to set up your Anthropic API key correctly.",
|
||||
ANTHROPIC_API_KEY_NO_CREDIT: "Please add credits to continue.",
|
||||
NO_DATA_FOR_THIS_ROUND: "There is no data available for this round.",
|
||||
ISSUE_FAILED_TO_ADD_PR_TO_SUMMARIZER_TODO: "We couldn't add the PR to the summarizer todo list. Please try again later.",
|
||||
}
|
||||
/*********************THE CONSTANTS THAT PROD/TEST ARE DIFFERENT *********************/
|
||||
export const defaultBountyMarkdownFile = "https://raw.githubusercontent.com/koii-network/prometheus-swarm-bounties/master/README.md"
|
||||
|
||||
export const customReward = 400*10**9 // This should be in ROE!
|
||||
|
||||
export const middleServerUrl = "https://ooww84kco0s0cs808w8cg804.dev.koii.network"
|
96
worker/src/utils/distributionList.ts
Normal file
96
worker/src/utils/distributionList.ts
Normal file
@ -0,0 +1,96 @@
|
||||
import { namespaceWrapper } from "@_koii/namespace-wrapper";
|
||||
import { getFile } from "./ipfs";
|
||||
|
||||
/**
|
||||
* Filter out ineligible nodes from the distribution list
|
||||
* @param distributionList Raw distribution list from namespace
|
||||
* @param submissions List of submissions for the round
|
||||
* @returns Filtered distribution list containing only eligible nodes
|
||||
*/
|
||||
async function filterIneligibleNodes(
|
||||
distributionList: Record<string, number>,
|
||||
roundNumber: number,
|
||||
): Promise<Record<string, any>> {
|
||||
const filteredDistributionList: Record<string, any> = {};
|
||||
|
||||
if (Object.keys(distributionList).length === 0) {
|
||||
console.log("Distribution list is empty, skipping filterIneligibleNodes");
|
||||
return filteredDistributionList;
|
||||
}
|
||||
|
||||
const taskSubmissionInfo = await namespaceWrapper.getTaskSubmissionInfo(roundNumber);
|
||||
if (!taskSubmissionInfo) {
|
||||
console.log("Task submission info is null, skipping filterIneligibleNodes");
|
||||
return filteredDistributionList;
|
||||
}
|
||||
|
||||
const submissions = taskSubmissionInfo.submissions;
|
||||
|
||||
for (const [stakingKey, amount] of Object.entries(distributionList)) {
|
||||
const numericAmount = amount as number;
|
||||
|
||||
// Skip if amount is zero or negative (failed audit)
|
||||
if (numericAmount <= 0) {
|
||||
console.log("Skipping staking key:", stakingKey, "Amount:", numericAmount);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Find corresponding submission
|
||||
const submissionCID = submissions[roundNumber][stakingKey]["submission_value"];
|
||||
|
||||
const submission = await getFile(submissionCID);
|
||||
|
||||
// Skip if no submission found
|
||||
if (!submission) {
|
||||
console.log("No submission found, skipping staking key:", stakingKey);
|
||||
continue;
|
||||
}
|
||||
|
||||
const submissionData = JSON.parse(submission);
|
||||
|
||||
console.log("Staking key:", stakingKey, "Submission data:", submissionData);
|
||||
|
||||
const payload = await namespaceWrapper.verifySignature(submissionData.signature, stakingKey);
|
||||
console.log("Payload:", payload);
|
||||
|
||||
const payloadData = JSON.parse(payload.data || "{}");
|
||||
|
||||
// Skip if submission has no PR URL or is a dummy submission
|
||||
if (!payloadData.prUrl || payloadData.prUrl === "none") {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Node is eligible, include in filtered list
|
||||
filteredDistributionList[stakingKey] = payloadData;
|
||||
}
|
||||
|
||||
console.log("Filtered distribution list:", filteredDistributionList);
|
||||
|
||||
return filteredDistributionList;
|
||||
}
|
||||
|
||||
export async function getDistributionList(roundNumber: number): Promise<Record<string, any> | null> {
|
||||
try {
|
||||
const taskDistributionInfo = await namespaceWrapper.getTaskDistributionInfo(roundNumber);
|
||||
if (!taskDistributionInfo) {
|
||||
console.log("Task distribution info is null, skipping task");
|
||||
return null;
|
||||
}
|
||||
const distribution = taskDistributionInfo.distribution_rewards_submission[roundNumber];
|
||||
const leaderStakingKey = Object.keys(distribution)[0];
|
||||
console.log("Fetching distribution list for round", roundNumber, "with leader staking key", leaderStakingKey);
|
||||
const distributionList = await namespaceWrapper.getDistributionList(leaderStakingKey, roundNumber);
|
||||
if (!distributionList) {
|
||||
console.log("Distribution list is null, skipping task");
|
||||
return null;
|
||||
}
|
||||
console.log("Raw distribution list:", distributionList);
|
||||
|
||||
const parsedDistributionList: Record<string, number> = JSON.parse(distributionList);
|
||||
|
||||
return await filterIneligibleNodes(parsedDistributionList, roundNumber);
|
||||
} catch (error) {
|
||||
console.error("Error fetching distribution list:", error);
|
||||
return null;
|
||||
}
|
||||
}
|
161
worker/src/utils/existingIssues.ts
Normal file
161
worker/src/utils/existingIssues.ts
Normal file
@ -0,0 +1,161 @@
|
||||
import { defaultBountyMarkdownFile } from "./constant";
|
||||
|
||||
interface BountyIssue {
|
||||
githubUrl: string;
|
||||
projectName: string;
|
||||
bountyTask: string;
|
||||
description: string;
|
||||
bountyAmount: string;
|
||||
bountyType: string;
|
||||
transactionHash: string;
|
||||
status: string;
|
||||
}
|
||||
|
||||
export async function getExistingIssues(): Promise<BountyIssue[]> {
|
||||
try {
|
||||
// read from the bounty markdown file
|
||||
// console.log('Fetching markdown file from:', defaultBountyMarkdownFile);
|
||||
const bountyMarkdownFile = await fetch(defaultBountyMarkdownFile);
|
||||
const bountyMarkdownFileText = await bountyMarkdownFile.text();
|
||||
|
||||
// console.log('Raw markdown content:', bountyMarkdownFileText);
|
||||
|
||||
const bountyMarkdownFileLines = bountyMarkdownFileText.split("\n");
|
||||
// console.log('Number of lines:', bountyMarkdownFileLines.length);
|
||||
|
||||
const issues: BountyIssue[] = [];
|
||||
let isTableStarted = false;
|
||||
|
||||
for (const line of bountyMarkdownFileLines) {
|
||||
// Skip empty lines
|
||||
if (line.trim() === '') {
|
||||
// console.log('Skipping empty line');
|
||||
continue;
|
||||
}
|
||||
|
||||
// console.log('Processing line:', line);
|
||||
|
||||
// Skip the title line starting with #
|
||||
if (line.startsWith('#')) {
|
||||
// console.log('Found title line:', line);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Skip the header and separator lines
|
||||
if (line.startsWith('|') && line.includes('GitHub URL')) {
|
||||
//console.log('Found header line');
|
||||
continue;
|
||||
}
|
||||
if (line.startsWith('|') && line.includes('-----')) {
|
||||
// console.log('Found separator line');
|
||||
continue;
|
||||
}
|
||||
|
||||
// Process table rows
|
||||
if (line.startsWith('|')) {
|
||||
isTableStarted = true;
|
||||
// Remove first and last | and split by |
|
||||
const cells = line.slice(1, -1).split('|').map(cell => cell.trim());
|
||||
// console.log('Parsed cells:', cells);
|
||||
|
||||
// Extract GitHub URL and name from markdown link format [name](url)
|
||||
const githubUrlMatch = cells[0].match(/\[(.*?)\]\((.*?)\)/);
|
||||
// console.log('GitHub URL match:', githubUrlMatch);
|
||||
|
||||
const projectName = githubUrlMatch ? githubUrlMatch[1] : '';
|
||||
const githubUrl = githubUrlMatch ? githubUrlMatch[2] : '';
|
||||
|
||||
const issue: BountyIssue = {
|
||||
githubUrl,
|
||||
projectName,
|
||||
bountyTask: cells[1],
|
||||
description: cells[3],
|
||||
bountyAmount: cells[4],
|
||||
bountyType: cells[5],
|
||||
transactionHash: cells[6],
|
||||
status: cells[7]
|
||||
};
|
||||
|
||||
// console.log('Created issue object:', issue);
|
||||
issues.push(issue);
|
||||
}
|
||||
}
|
||||
// Filter all issues with status "Initialized" && Bounty Task is Document & Summarize
|
||||
console.log('Final parsed issues number:', issues.length);
|
||||
return issues
|
||||
} catch (error) {
|
||||
// console.error('Error processing markdown:', error);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
export async function getInitializedDocumentSummarizeIssues(issues: BountyIssue[]) {
|
||||
|
||||
return issues.filter(issue => issue.status === "Initialized" && issue.bountyTask === "Document & Summarize");
|
||||
}
|
||||
|
||||
|
||||
// async function main(){
|
||||
// const existingIssues = await getExistingIssues();
|
||||
// const transactionHashs = [
|
||||
// "51680569890c40efa0f1f891044db219",
|
||||
// "21a7021da88a4092af014702da7638cb",
|
||||
// "befcf8d281074e3e934d8947c02ecb6f",
|
||||
// "a1db701bbda24a45b573e58840d9b31c",
|
||||
// "4ab503566a1142b1a3a9b406849839c9",
|
||||
// "7f6fb74e4b6a41b0af805ca3f6c9ea15",
|
||||
// "878af0d284c7460394b6d6e1090119be",
|
||||
// "64d90b6f891d4ea385c8f6ad81808103",
|
||||
// "6f7522b2e2374d4ca4f92bcf1f694bec",
|
||||
// "e85201ae9ed9417e8c56216bb44cd78b",
|
||||
// "d2ca259ef6ce4129a786677d919aad24",
|
||||
// "6ce684318aab4356b76ba64e87b31be7",
|
||||
// "d94d07647b1b42819d9bf629f5624ae1",
|
||||
// "60aa8f04dd314c14b30e5ac2957bd9f8",
|
||||
// "b7e21455e41b4626b5015b7bf39ff190",
|
||||
// "5e7109ed4dd94373958eda2416337ad3",
|
||||
// "2d647d3ab2c5465890939315ada47fd7",
|
||||
// "51ade1ba2f6341e99aa6ec56b1a00f27",
|
||||
// "a74f5e80238a4582aa444c18e9d5d66f",
|
||||
// "8390a3143a8445f196a124605e524f3d",
|
||||
// "26b712f341ca457d86db67ecd841c438",
|
||||
// "0ec98ba1e7174eef87772df8356bab0d",
|
||||
// "2737c33bff8c4490b7e5f53a5f5da580",
|
||||
// "e5b9b714d5694680a56cfa77361f3477",
|
||||
// "afb1bbbf1c074d28bef5fa216008cd6b",
|
||||
// "b40da8c53a644a6e898e3314e08c10ea",
|
||||
// "6a2f743c0497427ea4cd3cadb785b166",
|
||||
// "ce390111854b4a4b980b5e1e3f7c2f0e",
|
||||
// "c1b54e7a8dfd40be873051dd64bae5c4",
|
||||
// "7dcda8e5969c45e08f9a8887d8c39d10",
|
||||
// "fc11382529644d55b95fc2264e40436f",
|
||||
// "7c145db039b64edba719e81dd398b37e",
|
||||
// "c92b4920b25540a692c3b8e12215f0e0",
|
||||
// "cebbf4e2310d4a11ac44321823ddb373",
|
||||
// "5ae707005d0e413cb9feb9bdadc1e987",
|
||||
// "d28f92643c2548338d3e49144bc66afc",
|
||||
// "bd18484224c24fc786a5171e9d06cd50",
|
||||
// "f0605ea0f9524572bbe5bf4e72597476",
|
||||
// "62e6303c57334f72ada393bfa9e7aacc",
|
||||
// "f4ee9168804c4b01932ac76cc32d1f13",
|
||||
// "d4a95e2d35db47d28a208309019b1925",
|
||||
// "014425adc1b8447ab34d7d8104e91cf0"
|
||||
// ]
|
||||
// const initializedDocumentSummarizeIssues = existingIssues.filter((issue) => transactionHashs.includes(issue.transactionHash));
|
||||
// if (initializedDocumentSummarizeIssues.length == 0) {
|
||||
// console.log("No issues pending to be summarized");
|
||||
// return;
|
||||
// }
|
||||
// console.log("Initialized Document & Summarize issues number:", initializedDocumentSummarizeIssues.length);
|
||||
// }
|
||||
// async function main() {
|
||||
// try {
|
||||
// const existingIssues = await getInitializedDocumentSummarizeIssues();
|
||||
// console.log('Initialized Document & Summarize issues number:', existingIssues.length);
|
||||
// } catch (error) {
|
||||
// console.error('Error in main:', error);
|
||||
// }
|
||||
// }
|
||||
|
||||
// main();
|
36
worker/src/utils/githubCheck.ts
Normal file
36
worker/src/utils/githubCheck.ts
Normal file
@ -0,0 +1,36 @@
|
||||
export async function checkGitHub(username: string, token: string) {
|
||||
// 1. Check username
|
||||
const userRes = await fetch(`https://api.github.com/users/${username}`);
|
||||
const isUsernameValid = userRes.status === 200;
|
||||
|
||||
// 2. Check token
|
||||
const tokenRes = await fetch('https://api.github.com/user', {
|
||||
headers: {
|
||||
Authorization: `token ${token}`,
|
||||
},
|
||||
});
|
||||
const isTokenValid = tokenRes.status === 200;
|
||||
const isIdentityValid = await checkGitHubIdentity(username, token);
|
||||
return isIdentityValid&&isUsernameValid&&isTokenValid
|
||||
}
|
||||
|
||||
async function checkGitHubIdentity(username: string, token: string) {
|
||||
const res = await fetch('https://api.github.com/user', {
|
||||
headers: {
|
||||
Authorization: `token ${token}`,
|
||||
Accept: 'application/vnd.github.v3+json',
|
||||
},
|
||||
});
|
||||
|
||||
if (res.status !== 200) {
|
||||
return false
|
||||
}
|
||||
|
||||
const data = await res.json();
|
||||
|
||||
if (data.login.toLowerCase() !== username.toLowerCase()) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
34
worker/src/utils/ipfs.ts
Normal file
34
worker/src/utils/ipfs.ts
Normal file
@ -0,0 +1,34 @@
|
||||
import { namespaceWrapper } from "@_koii/namespace-wrapper";
|
||||
import { KoiiStorageClient } from "@_koii/storage-task-sdk";
|
||||
import fs from "fs";
|
||||
|
||||
export async function storeFile(data: any, filename: string = "submission.json"): Promise<string> {
|
||||
// Create a new instance of the Koii Storage Client
|
||||
const client = KoiiStorageClient.getInstance({});
|
||||
const basePath = await namespaceWrapper.getBasePath();
|
||||
try {
|
||||
// Write the data to a temp file
|
||||
fs.writeFileSync(`${basePath}/${filename}`, typeof data === "string" ? data : JSON.stringify(data));
|
||||
|
||||
// Get the user staking account, to be used for signing the upload request
|
||||
const userStaking = await namespaceWrapper.getSubmitterAccount();
|
||||
if (!userStaking) {
|
||||
throw new Error("No staking keypair found");
|
||||
}
|
||||
|
||||
// Upload the file to IPFS and get the CID
|
||||
const { cid } = await client.uploadFile(`${basePath}/${filename}`, userStaking);
|
||||
return cid;
|
||||
} catch (error) {
|
||||
throw error;
|
||||
} finally {
|
||||
// Delete the temp file
|
||||
fs.unlinkSync(`${basePath}/${filename}`);
|
||||
}
|
||||
}
|
||||
|
||||
export async function getFile(cid: string, filename: string = "submission.json"): Promise<string> {
|
||||
const storageClient = KoiiStorageClient.getInstance({});
|
||||
const fileBlob = await storageClient.getFile(cid, filename);
|
||||
return await fileBlob.text();
|
||||
}
|
265
worker/src/utils/leader.ts
Normal file
265
worker/src/utils/leader.ts
Normal file
@ -0,0 +1,265 @@
|
||||
import { namespaceWrapper, TASK_ID } from "@_koii/namespace-wrapper";
|
||||
import { getFile } from "./ipfs";
|
||||
import seedrandom from "seedrandom";
|
||||
|
||||
export async function fetchRoundSubmissionGitHubRepoOwner(
|
||||
roundNumber: number,
|
||||
submitterPublicKey: string,
|
||||
): Promise<string | null> {
|
||||
try {
|
||||
const taskSubmissionInfo = await namespaceWrapper.getTaskSubmissionInfo(roundNumber);
|
||||
if (!taskSubmissionInfo) {
|
||||
console.error("NO TASK SUBMISSION INFO");
|
||||
return null;
|
||||
}
|
||||
const submissions = taskSubmissionInfo.submissions;
|
||||
// This should only have one round
|
||||
const lastRound = Object.keys(submissions).pop();
|
||||
if (!lastRound) {
|
||||
return null;
|
||||
}
|
||||
const lastRoundSubmissions = submissions[lastRound];
|
||||
const lastRoundSubmitterSubmission = lastRoundSubmissions[submitterPublicKey];
|
||||
console.log("lastRoundSubmitterSubmission", { lastRoundSubmitterSubmission });
|
||||
if (!lastRoundSubmitterSubmission) {
|
||||
return null;
|
||||
}
|
||||
const cid = lastRoundSubmitterSubmission.submission_value;
|
||||
const submissionString = await getFile(cid);
|
||||
const submission = JSON.parse(submissionString);
|
||||
console.log({ submission });
|
||||
|
||||
// verify the signature of the submission
|
||||
const signaturePayload = await namespaceWrapper.verifySignature(submission.signature, submitterPublicKey);
|
||||
|
||||
console.log({ signaturePayload });
|
||||
|
||||
// verify the signature payload
|
||||
if (signaturePayload.error || !signaturePayload.data) {
|
||||
console.error("INVALID SIGNATURE");
|
||||
return null;
|
||||
}
|
||||
const data = JSON.parse(signaturePayload.data);
|
||||
|
||||
if (data.taskId !== TASK_ID || data.stakingKey !== submitterPublicKey) {
|
||||
console.error("INVALID SIGNATURE DATA");
|
||||
return null;
|
||||
}
|
||||
if (!data.githubUsername) {
|
||||
console.error("NO GITHUB USERNAME");
|
||||
console.log("data", { data });
|
||||
return null;
|
||||
}
|
||||
return data.githubUsername;
|
||||
} catch (error) {
|
||||
console.error("FETCH LAST ROUND SUBMISSION GITHUB REPO OWNER ERROR:", error);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
export async function selectShortestDistance(keys: string[], submitterPublicKey: string): Promise<string> {
|
||||
let shortestDistance = Infinity;
|
||||
let closestKey = "";
|
||||
for (const key of keys) {
|
||||
const distance = knnDistance(submitterPublicKey, key);
|
||||
if (distance < shortestDistance) {
|
||||
shortestDistance = distance;
|
||||
closestKey = key;
|
||||
}
|
||||
}
|
||||
return closestKey;
|
||||
}
|
||||
|
||||
async function getSubmissionInfo(roundNumber: number): Promise<any> {
|
||||
try {
|
||||
return await namespaceWrapper.getTaskSubmissionInfo(roundNumber);
|
||||
} catch (error) {
|
||||
console.error("GET SUBMISSION INFO ERROR:", error);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
function calculatePublicKeyFrequency(submissions: any): Record<string, number> {
|
||||
const frequency: Record<string, number> = {};
|
||||
for (const round in submissions) {
|
||||
for (const publicKey in submissions[round]) {
|
||||
if (frequency[publicKey]) {
|
||||
frequency[publicKey]++;
|
||||
} else {
|
||||
frequency[publicKey] = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
return frequency;
|
||||
}
|
||||
|
||||
function handleAuditTrigger(submissionAuditTrigger: any): Set<string> {
|
||||
const auditTriggerKeys = new Set<string>();
|
||||
for (const round in submissionAuditTrigger) {
|
||||
for (const publicKey in submissionAuditTrigger[round]) {
|
||||
auditTriggerKeys.add(publicKey);
|
||||
}
|
||||
}
|
||||
return auditTriggerKeys;
|
||||
}
|
||||
|
||||
async function selectLeaderKey(
|
||||
sortedKeys: string[],
|
||||
leaderNumber: number,
|
||||
submitterPublicKey: string,
|
||||
submissionPublicKeysFrequency: Record<string, number>,
|
||||
): Promise<string> {
|
||||
const topValue = sortedKeys[leaderNumber - 1];
|
||||
const count = sortedKeys.filter(
|
||||
(key) => submissionPublicKeysFrequency[key] >= submissionPublicKeysFrequency[topValue],
|
||||
).length;
|
||||
|
||||
if (count >= leaderNumber) {
|
||||
const rng = seedrandom(String(TASK_ID));
|
||||
const guaranteedKeys = sortedKeys.filter(
|
||||
(key) => submissionPublicKeysFrequency[key] > submissionPublicKeysFrequency[topValue],
|
||||
);
|
||||
const randomKeys = sortedKeys
|
||||
.filter((key) => submissionPublicKeysFrequency[key] === submissionPublicKeysFrequency[topValue])
|
||||
.sort(() => rng() - 0.5)
|
||||
.slice(0, leaderNumber - guaranteedKeys.length);
|
||||
const keys = [...guaranteedKeys, ...randomKeys];
|
||||
return await selectShortestDistance(keys, submitterPublicKey);
|
||||
} else {
|
||||
const keys = sortedKeys.slice(0, leaderNumber);
|
||||
return await selectShortestDistance(keys, submitterPublicKey);
|
||||
}
|
||||
}
|
||||
export async function getRandomNodes(roundNumber: number, numberOfNodes: number): Promise<string[]> {
|
||||
console.log("Getting random nodes for round:", roundNumber, "with number of nodes:", numberOfNodes);
|
||||
const lastRoundSubmission = await getSubmissionInfo(roundNumber - 1);
|
||||
console.log("Last round submission:", lastRoundSubmission);
|
||||
if (!lastRoundSubmission) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const lastRoundSubmissions = lastRoundSubmission.submissions;
|
||||
console.log("Last round submissions:", lastRoundSubmissions);
|
||||
|
||||
// Get the last round number
|
||||
const lastRound = Object.keys(lastRoundSubmissions).pop();
|
||||
if (!lastRound) {
|
||||
return [];
|
||||
}
|
||||
|
||||
// Get the submissions for that round
|
||||
const submissions = lastRoundSubmissions[lastRound];
|
||||
console.log("Submissions:", submissions);
|
||||
const availableKeys = Object.keys(submissions);
|
||||
console.log("Available keys:", availableKeys);
|
||||
// If we have fewer submissions than requested nodes, return all available submissions
|
||||
if (availableKeys.length <= numberOfNodes) {
|
||||
return availableKeys;
|
||||
}
|
||||
|
||||
const seed = TASK_ID + roundNumber.toString() || "default" + roundNumber;
|
||||
const rng = seedrandom(seed);
|
||||
// Use the keys from the submissions object
|
||||
const randomKeys = availableKeys.sort(() => rng() - 0.5).slice(0, numberOfNodes);
|
||||
|
||||
console.log("Random keys:", randomKeys);
|
||||
return randomKeys;
|
||||
}
|
||||
|
||||
// Helper function that finds the leader for a specific round
|
||||
async function getLeaderForRound(
|
||||
roundNumber: number,
|
||||
maxLeaderNumber: number,
|
||||
submitterPublicKey: string,
|
||||
): Promise<{ chosenKey: string | null; leaderNode: string | null }> {
|
||||
if (roundNumber <= 0) {
|
||||
return { chosenKey: null, leaderNode: null };
|
||||
}
|
||||
|
||||
const submissionPublicKeysFrequency: Record<string, number> = {};
|
||||
const submissionAuditTriggerKeys = new Set<string>();
|
||||
|
||||
for (let i = 1; i < 5; i++) {
|
||||
const taskSubmissionInfo = await getSubmissionInfo(roundNumber - i);
|
||||
console.log({ taskSubmissionInfo });
|
||||
if (taskSubmissionInfo) {
|
||||
const submissions = taskSubmissionInfo.submissions;
|
||||
const frequency = calculatePublicKeyFrequency(submissions);
|
||||
Object.assign(submissionPublicKeysFrequency, frequency);
|
||||
|
||||
const auditTriggerKeys = handleAuditTrigger(taskSubmissionInfo.submissions_audit_trigger);
|
||||
auditTriggerKeys.forEach((key) => submissionAuditTriggerKeys.add(key));
|
||||
}
|
||||
}
|
||||
|
||||
const keysNotInAuditTrigger = Object.keys(submissionPublicKeysFrequency).filter(
|
||||
(key) => !submissionAuditTriggerKeys.has(key),
|
||||
);
|
||||
const sortedKeys = keysNotInAuditTrigger.sort(
|
||||
(a, b) => submissionPublicKeysFrequency[b] - submissionPublicKeysFrequency[a],
|
||||
);
|
||||
|
||||
console.log({ sortedKeys });
|
||||
|
||||
let chosenKey = null;
|
||||
|
||||
const leaderNumber = sortedKeys.length < maxLeaderNumber ? sortedKeys.length : maxLeaderNumber;
|
||||
|
||||
chosenKey = await selectLeaderKey(sortedKeys, leaderNumber, submitterPublicKey, submissionPublicKeysFrequency);
|
||||
|
||||
// Find GitHub username for the chosen key
|
||||
for (let i = 1; i < 5; i++) {
|
||||
const githubUsername = await fetchRoundSubmissionGitHubRepoOwner(roundNumber - i, chosenKey);
|
||||
if (githubUsername) {
|
||||
return { chosenKey, leaderNode: githubUsername };
|
||||
}
|
||||
}
|
||||
|
||||
return { chosenKey, leaderNode: null };
|
||||
}
|
||||
|
||||
export async function getLeaderNode({
|
||||
roundNumber,
|
||||
leaderNumber = 5,
|
||||
submitterPublicKey,
|
||||
}: {
|
||||
roundNumber: number;
|
||||
leaderNumber?: number;
|
||||
submitterPublicKey: string;
|
||||
}): Promise<{ isLeader: boolean; leaderNode: string | null }> {
|
||||
// Find leader for current round
|
||||
const currentLeader = await getLeaderForRound(roundNumber, leaderNumber, submitterPublicKey);
|
||||
console.log({ currentLeader });
|
||||
|
||||
if (currentLeader.chosenKey === submitterPublicKey) {
|
||||
// If we're the leader, get the leader from 3 rounds ago
|
||||
const previousLeader = await getLeaderForRound(roundNumber - 3, leaderNumber, submitterPublicKey);
|
||||
console.log({ previousLeader });
|
||||
return { isLeader: true, leaderNode: previousLeader.leaderNode };
|
||||
}
|
||||
|
||||
// Not the leader, return the current leader's info
|
||||
return { isLeader: false, leaderNode: currentLeader.leaderNode };
|
||||
}
|
||||
|
||||
function base58ToNumber(char: string): number {
|
||||
const base58Chars = "123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz";
|
||||
return base58Chars.indexOf(char);
|
||||
}
|
||||
|
||||
function knnDistance(a: string, b: string): number {
|
||||
if (a.length !== b.length) {
|
||||
throw new Error("Strings must be of the same length for KNN distance calculation.");
|
||||
}
|
||||
const truncatedA = a.slice(0, 30);
|
||||
const truncatedB = b.slice(0, 30);
|
||||
|
||||
let distance = 0;
|
||||
for (let i = 0; i < truncatedA.length; i++) {
|
||||
const numA = base58ToNumber(truncatedA[i]);
|
||||
const numB = base58ToNumber(truncatedB[i]);
|
||||
distance += Math.abs(numA - numB);
|
||||
}
|
||||
|
||||
return distance;
|
||||
}
|
40
worker/src/utils/submissionJSONSignatureDecode.ts
Normal file
40
worker/src/utils/submissionJSONSignatureDecode.ts
Normal file
@ -0,0 +1,40 @@
|
||||
import { TASK_ID } from "@_koii/namespace-wrapper";
|
||||
import { getFile } from "./ipfs";
|
||||
import { Submission } from "@_koii/namespace-wrapper/dist/types";
|
||||
import { Submitter } from "@_koii/task-manager/dist/types/global";
|
||||
import { namespaceWrapper } from "@_koii/namespace-wrapper";
|
||||
export async function submissionJSONSignatureDecode({submission_value, submitterPublicKey, roundNumber}: {submission_value: string, submitterPublicKey: string, roundNumber: number}) {
|
||||
let submissionString;
|
||||
try {
|
||||
console.log("Getting file from IPFS", submission_value);
|
||||
submissionString = await getFile(submission_value);
|
||||
console.log("submissionString", submissionString);
|
||||
} catch (error) {
|
||||
|
||||
console.log("error", error);
|
||||
console.error("INVALID SIGNATURE DATA");
|
||||
return null;
|
||||
}
|
||||
// verify the signature of the submission
|
||||
const submission = JSON.parse(submissionString);
|
||||
console.log("submission", submission);
|
||||
const signaturePayload = await namespaceWrapper.verifySignature(submission.signature, submitterPublicKey);
|
||||
if (!signaturePayload.data) {
|
||||
console.error("INVALID SIGNATURE");
|
||||
return null;
|
||||
}
|
||||
const data = JSON.parse(signaturePayload.data);
|
||||
console.log("signaturePayload", signaturePayload);
|
||||
console.log("data", data);
|
||||
if (
|
||||
data.taskId !== TASK_ID ||
|
||||
data.roundNumber !== roundNumber ||
|
||||
data.stakingKey !== submitterPublicKey ||
|
||||
!data.pubKey ||
|
||||
!data.prUrl
|
||||
) {
|
||||
console.error("INVALID SIGNATURE DATA");
|
||||
return null;
|
||||
}
|
||||
return data;
|
||||
}
|
68
worker/tests/README.md
Normal file
68
worker/tests/README.md
Normal file
@ -0,0 +1,68 @@
|
||||
# Summarizer Task Tests
|
||||
|
||||
This directory contains end-to-end tests for the summarizer task using the Prometheus test framework.
|
||||
|
||||
## Structure
|
||||
|
||||
```
|
||||
tests/
|
||||
├── config.yaml # Test configuration
|
||||
├── workers.json # Worker configuration
|
||||
├── data/ # Test data
|
||||
│ ├── todos.json # Sample todo items
|
||||
│ └── issues.json # Sample issues
|
||||
├── stages/ # Test stages implementation
|
||||
├── e2e.py # Test runner script
|
||||
└── steps.py # Test steps definition
|
||||
```
|
||||
|
||||
## Prerequisites
|
||||
|
||||
1. Install the test framework:
|
||||
```bash
|
||||
pip install -e test-framework/
|
||||
```
|
||||
|
||||
2. Set up environment variables in `.env`:
|
||||
```
|
||||
ANTHROPIC_API_KEY=your_test_key
|
||||
GITHUB_USERNAME=your_test_username
|
||||
GITHUB_TOKEN=your_test_token
|
||||
```
|
||||
|
||||
## Running Tests
|
||||
|
||||
To run the tests:
|
||||
|
||||
```bash
|
||||
python -m tests.e2e
|
||||
```
|
||||
|
||||
To force reset databases before running:
|
||||
|
||||
```bash
|
||||
python -m tests.e2e --reset
|
||||
```
|
||||
|
||||
## Test Flow
|
||||
|
||||
1. API Key Validation
|
||||
- Validates Anthropic API key
|
||||
|
||||
2. GitHub Validation
|
||||
- Validates GitHub credentials
|
||||
|
||||
3. Todo Management
|
||||
- Fetches todos for each worker
|
||||
- Generates summaries
|
||||
- Submits results
|
||||
|
||||
4. Audit Process
|
||||
- Workers audit each other's submissions
|
||||
|
||||
## Adding New Tests
|
||||
|
||||
1. Create a new stage in `stages/`
|
||||
2. Add stage to `stages/__init__.py`
|
||||
3. Add test step in `steps.py`
|
||||
4. Update test data in `data/` if needed
|
12
worker/tests/config.ts
Normal file
12
worker/tests/config.ts
Normal file
@ -0,0 +1,12 @@
|
||||
import "dotenv/config";
|
||||
|
||||
export const TASK_ID =
|
||||
process.env.TASK_ID || "BXbYKFdXZhQgEaMFbeShaisQBYG1FD4MiSf9gg4n6mVn";
|
||||
export const WEBPACKED_FILE_PATH =
|
||||
process.env.WEBPACKED_FILE_PATH || "../dist/main.js";
|
||||
|
||||
const envKeywords = process.env.TEST_KEYWORDS ?? "";
|
||||
|
||||
export const TEST_KEYWORDS = envKeywords
|
||||
? envKeywords.split(",")
|
||||
: ["TEST", "EZ TESTING"];
|
16
worker/tests/config.yaml
Normal file
16
worker/tests/config.yaml
Normal file
@ -0,0 +1,16 @@
|
||||
task_id: "summarizer"
|
||||
base_port: 5000
|
||||
max_rounds: 3
|
||||
|
||||
data_dir: data
|
||||
workers_config: workers.json
|
||||
|
||||
mongodb:
|
||||
database: summarizer_test
|
||||
collections:
|
||||
todos:
|
||||
data_file: todos.json
|
||||
required_count: 1
|
||||
issues:
|
||||
data_file: issues.json
|
||||
required_count: 1
|
16
worker/tests/data/issues.json
Normal file
16
worker/tests/data/issues.json
Normal file
@ -0,0 +1,16 @@
|
||||
[
|
||||
{
|
||||
"taskId": "summarizer",
|
||||
"githubUrl": "https://github.com/test_owner/test_repo/issues/1",
|
||||
"title": "Test Issue 1",
|
||||
"body": "This is a test issue for summarization",
|
||||
"status": "open"
|
||||
},
|
||||
{
|
||||
"taskId": "summarizer",
|
||||
"githubUrl": "https://github.com/test_owner/test_repo/issues/2",
|
||||
"title": "Test Issue 2",
|
||||
"body": "This is another test issue for summarization",
|
||||
"status": "open"
|
||||
}
|
||||
]
|
20
worker/tests/data/todos.json
Normal file
20
worker/tests/data/todos.json
Normal file
@ -0,0 +1,20 @@
|
||||
[
|
||||
{
|
||||
"taskId": "summarizer",
|
||||
"roundNumber": 1,
|
||||
"repo_owner": "test_owner",
|
||||
"repo_name": "test_repo",
|
||||
"prUrl": "https://github.com/test_owner/test_repo/pull/1",
|
||||
"status": "pending",
|
||||
"stakingKey": "test_key_1"
|
||||
},
|
||||
{
|
||||
"taskId": "summarizer",
|
||||
"roundNumber": 1,
|
||||
"repo_owner": "test_owner",
|
||||
"repo_name": "test_repo",
|
||||
"prUrl": "https://github.com/test_owner/test_repo/pull/2",
|
||||
"status": "pending",
|
||||
"stakingKey": "test_key_2"
|
||||
}
|
||||
]
|
112
worker/tests/debugger.ts
Normal file
112
worker/tests/debugger.ts
Normal file
@ -0,0 +1,112 @@
|
||||
import "dotenv/config";
|
||||
import os from "os";
|
||||
import path from "path";
|
||||
import { Connection, PublicKey } from "@_koii/web3.js";
|
||||
import { borsh_bpf_js_deserialize } from "./wasm/bincode_js.cjs";
|
||||
import { TASK_ID, WEBPACKED_FILE_PATH, TEST_KEYWORDS } from "./config";
|
||||
|
||||
class Debugger {
|
||||
/*
|
||||
Create .env file with following variables or directly input values to be used in live-debugging mode.
|
||||
*/
|
||||
static taskID = TASK_ID;
|
||||
static webpackedFilePath = WEBPACKED_FILE_PATH;
|
||||
static keywords = TEST_KEYWORDS;
|
||||
static nodeDir: string;
|
||||
|
||||
static async getConfig() {
|
||||
Debugger.nodeDir = await this.getNodeDirectory();
|
||||
|
||||
let destinationPath = "executables/" + (await this.getAuditProgram()) + ".js";
|
||||
let logPath = "namespace/" + TASK_ID + "/task.log";
|
||||
|
||||
console.log("Debugger.nodeDir", Debugger.nodeDir);
|
||||
|
||||
return {
|
||||
webpackedFilePath: Debugger.webpackedFilePath,
|
||||
destinationPath: destinationPath,
|
||||
keywords: Debugger.keywords,
|
||||
logPath: logPath,
|
||||
nodeDir: Debugger.nodeDir,
|
||||
taskID: Debugger.taskID,
|
||||
};
|
||||
}
|
||||
|
||||
static async getNodeDirectory() {
|
||||
if (Debugger.nodeDir) {
|
||||
return Debugger.nodeDir;
|
||||
}
|
||||
const homeDirectory = os.homedir();
|
||||
let nodeDirectory: string;
|
||||
|
||||
switch (os.platform()) {
|
||||
case "linux":
|
||||
nodeDirectory = path.join(homeDirectory, ".config", "KOII-Desktop-Node");
|
||||
break;
|
||||
case "darwin":
|
||||
nodeDirectory = path.join(homeDirectory, "Library", "Application Support", "KOII-Desktop-Node");
|
||||
break;
|
||||
default:
|
||||
// Windows is the default
|
||||
nodeDirectory = path.join(homeDirectory, "AppData", "Roaming", "KOII-Desktop-Node");
|
||||
}
|
||||
|
||||
return nodeDirectory;
|
||||
}
|
||||
|
||||
static async getAuditProgram() {
|
||||
const connection = new Connection("https://mainnet.koii.network");
|
||||
const taskId = Debugger.taskID;
|
||||
const accountInfo = await connection.getAccountInfo(new PublicKey(taskId));
|
||||
if (!accountInfo?.data) {
|
||||
console.log(`${taskId} doesn't contain any distribution list data`);
|
||||
return null;
|
||||
}
|
||||
let data;
|
||||
const owner = accountInfo.owner.toBase58();
|
||||
if (owner === "Koiitask22222222222222222222222222222222222") {
|
||||
data = JSON.parse(accountInfo.data.toString());
|
||||
} else if (owner === "KPLTRVs6jA7QTthuJH2cEmyCEskFbSV2xpZw46cganN") {
|
||||
const buffer = accountInfo.data;
|
||||
data = borsh_bpf_js_deserialize(buffer);
|
||||
data = parseTaskState(data);
|
||||
} else {
|
||||
console.error(`Not a valid Task ID ${taskId}`);
|
||||
return null;
|
||||
}
|
||||
|
||||
console.log("data.task_audit_program", data.task_audit_program);
|
||||
return data.task_audit_program;
|
||||
}
|
||||
}
|
||||
|
||||
function parseTaskState(taskState: any) {
|
||||
taskState.stake_list = objectify(taskState.stake_list, true);
|
||||
taskState.ip_address_list = objectify(taskState.ip_address_list, true);
|
||||
taskState.distributions_audit_record = objectify(taskState.distributions_audit_record, true);
|
||||
taskState.distributions_audit_trigger = objectify(taskState.distributions_audit_trigger, true);
|
||||
taskState.submissions = objectify(taskState.submissions, true);
|
||||
taskState.submissions_audit_trigger = objectify(taskState.submissions_audit_trigger, true);
|
||||
taskState.distribution_rewards_submission = objectify(taskState.distribution_rewards_submission, true);
|
||||
taskState.available_balances = objectify(taskState.available_balances, true);
|
||||
return taskState;
|
||||
}
|
||||
|
||||
function objectify(data: any, recursive = false) {
|
||||
if (data instanceof Map) {
|
||||
const obj = Object.fromEntries(data);
|
||||
if (recursive) {
|
||||
for (const key in obj) {
|
||||
if (obj[key] instanceof Map) {
|
||||
obj[key] = objectify(obj[key], true);
|
||||
} else if (typeof obj[key] === "object" && obj[key] !== null) {
|
||||
obj[key] = objectify(obj[key], true);
|
||||
}
|
||||
}
|
||||
}
|
||||
return obj;
|
||||
}
|
||||
return data;
|
||||
}
|
||||
|
||||
export default Debugger;
|
62
worker/tests/e2e.py
Normal file
62
worker/tests/e2e.py
Normal file
@ -0,0 +1,62 @@
|
||||
"""End-to-end test for the summarizer task."""
|
||||
|
||||
from pathlib import Path
|
||||
from prometheus_test import TestRunner
|
||||
import dotenv
|
||||
import argparse
|
||||
import uuid
|
||||
|
||||
from .steps import steps
|
||||
|
||||
dotenv.load_dotenv()
|
||||
|
||||
|
||||
def parse_args():
|
||||
parser = argparse.ArgumentParser(description="Run summarizer test sequence")
|
||||
parser.add_argument(
|
||||
"--reset",
|
||||
action="store_true",
|
||||
help="Force reset of all databases before running tests",
|
||||
)
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def post_load_callback(db):
|
||||
"""Post-load callback to process MongoDB data after JSON import"""
|
||||
# Process todos collection
|
||||
todos = list(db.todos.find({"taskId": runner.config.task_id}))
|
||||
for todo in todos:
|
||||
if "uuid" not in todo:
|
||||
todo["uuid"] = str(uuid.uuid4())
|
||||
db.todos.replace_one({"_id": todo["_id"]}, todo)
|
||||
|
||||
# Process issues collection
|
||||
issues = list(db.issues.find({"taskId": runner.config.task_id}))
|
||||
for issue in issues:
|
||||
if "uuid" not in issue:
|
||||
issue["uuid"] = str(uuid.uuid4())
|
||||
db.issues.replace_one({"_id": issue["_id"]}, issue)
|
||||
|
||||
|
||||
# Global reference to the test runner
|
||||
runner = None
|
||||
|
||||
|
||||
def main():
|
||||
global runner
|
||||
args = parse_args()
|
||||
|
||||
# Create test runner with config from YAML
|
||||
base_dir = Path(__file__).parent
|
||||
runner = TestRunner(
|
||||
steps=steps,
|
||||
config_file=base_dir / "config.yaml",
|
||||
config_overrides={"post_load_callback": post_load_callback},
|
||||
)
|
||||
|
||||
# Run test sequence
|
||||
runner.run(force_reset=args.reset)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
188
worker/tests/main.test.ts
Normal file
188
worker/tests/main.test.ts
Normal file
@ -0,0 +1,188 @@
|
||||
import { initializeTaskManager, taskRunner } from "@_koii/task-manager";
|
||||
import { setup } from "../src/task/0-setup";
|
||||
import { task } from "../src/task/1-task";
|
||||
import { submission } from "../src/task/2-submission";
|
||||
import { audit } from "../src/task/3-audit";
|
||||
import { distribution } from "../src/task/4-distribution";
|
||||
import { routes } from "../src/task/5-routes";
|
||||
import { namespaceWrapper, _server } from "@_koii/task-manager/namespace-wrapper";
|
||||
import Joi from "joi";
|
||||
import axios from "axios";
|
||||
import { Submitter } from "@_koii/task-manager";
|
||||
beforeAll(async () => {
|
||||
await namespaceWrapper.defaultTaskSetup();
|
||||
initializeTaskManager({
|
||||
setup,
|
||||
task,
|
||||
submission,
|
||||
audit,
|
||||
distribution,
|
||||
routes,
|
||||
});
|
||||
});
|
||||
|
||||
describe("Performing the task", () => {
|
||||
it("should performs the core logic task", async () => {
|
||||
const round = 1;
|
||||
await taskRunner.task(round);
|
||||
const value = await namespaceWrapper.storeGet("value");
|
||||
expect(value).toBeDefined();
|
||||
expect(value).not.toBeNull();
|
||||
});
|
||||
|
||||
it("should make the submission to k2 for dummy round 1", async () => {
|
||||
const round = 1;
|
||||
await taskRunner.submitTask(round);
|
||||
const taskState = await namespaceWrapper.getTaskState({});
|
||||
const schema = Joi.object()
|
||||
.pattern(
|
||||
Joi.string(),
|
||||
Joi.object().pattern(
|
||||
Joi.string(),
|
||||
Joi.object({
|
||||
submission_value: Joi.string().required(),
|
||||
slot: Joi.number().integer().required(),
|
||||
round: Joi.number().integer().required(),
|
||||
}),
|
||||
),
|
||||
)
|
||||
.required()
|
||||
.min(1);
|
||||
const validationResult = schema.validate(taskState?.submissions);
|
||||
try {
|
||||
expect(validationResult.error).toBeUndefined();
|
||||
} catch (e) {
|
||||
throw new Error("Submission doesn't exist or is incorrect");
|
||||
}
|
||||
});
|
||||
|
||||
it("should make an audit on submission", async () => {
|
||||
const round = 1;
|
||||
await taskRunner.auditTask(round);
|
||||
const taskState = await namespaceWrapper.getTaskState({});
|
||||
console.log("TASK STATE", taskState);
|
||||
console.log("audit task", taskState?.submissions_audit_trigger);
|
||||
const schema = Joi.object()
|
||||
.pattern(
|
||||
Joi.string(),
|
||||
Joi.object().pattern(
|
||||
Joi.string(),
|
||||
Joi.object({
|
||||
trigger_by: Joi.string().required(),
|
||||
slot: Joi.number().integer().required(),
|
||||
votes: Joi.array().required(),
|
||||
}),
|
||||
),
|
||||
)
|
||||
.required();
|
||||
const validationResult = schema.validate(taskState?.submissions_audit_trigger);
|
||||
try {
|
||||
expect(validationResult.error).toBeUndefined();
|
||||
} catch (e) {
|
||||
throw new Error("Submission audit is incorrect");
|
||||
}
|
||||
});
|
||||
it("should make the distribution submission to k2 for dummy round 1", async () => {
|
||||
const round = 1;
|
||||
await taskRunner.submitDistributionList(round);
|
||||
|
||||
const taskState = await namespaceWrapper.getTaskState({});
|
||||
const schema = Joi.object()
|
||||
.pattern(
|
||||
Joi.string(),
|
||||
Joi.object().pattern(
|
||||
Joi.string(),
|
||||
Joi.object({
|
||||
submission_value: Joi.string().required(),
|
||||
slot: Joi.number().integer().required(),
|
||||
round: Joi.number().integer().required(),
|
||||
}),
|
||||
),
|
||||
)
|
||||
.required()
|
||||
.min(1);
|
||||
console.log("Distribution submission", taskState?.distribution_rewards_submission);
|
||||
const validationResult = schema.validate(taskState?.distribution_rewards_submission);
|
||||
try {
|
||||
expect(validationResult.error).toBeUndefined();
|
||||
} catch (e) {
|
||||
throw new Error("Distribution submission doesn't exist or is incorrect");
|
||||
}
|
||||
});
|
||||
it("should make an audit on distribution submission", async () => {
|
||||
const round = 1;
|
||||
await taskRunner.auditDistribution(round);
|
||||
const taskState = await namespaceWrapper.getTaskState({});
|
||||
console.log("audit task", taskState?.distributions_audit_trigger);
|
||||
const schema = Joi.object()
|
||||
.pattern(
|
||||
Joi.string(),
|
||||
Joi.object().pattern(
|
||||
Joi.string(),
|
||||
Joi.object({
|
||||
trigger_by: Joi.string().required(),
|
||||
slot: Joi.number().integer().required(),
|
||||
votes: Joi.array().required(),
|
||||
}),
|
||||
),
|
||||
)
|
||||
.required();
|
||||
const validationResult = schema.validate(taskState?.distributions_audit_trigger);
|
||||
try {
|
||||
expect(validationResult.error).toBeUndefined();
|
||||
} catch (e) {
|
||||
throw new Error("Distribution audit is incorrect");
|
||||
}
|
||||
});
|
||||
|
||||
it("should make sure the submitted distribution list is valid", async () => {
|
||||
const round = 1;
|
||||
const distributionList = await namespaceWrapper.getDistributionList("", round);
|
||||
console.log("Generated distribution List", JSON.parse(distributionList.toString()));
|
||||
const schema = Joi.object().pattern(Joi.string().required(), Joi.number().integer().required()).required();
|
||||
const validationResult = schema.validate(JSON.parse(distributionList.toString()));
|
||||
console.log(validationResult);
|
||||
try {
|
||||
expect(validationResult.error).toBeUndefined();
|
||||
} catch (e) {
|
||||
throw new Error("Submitted distribution list is not valid");
|
||||
}
|
||||
});
|
||||
|
||||
it("should test the endpoint", async () => {
|
||||
const response = await axios.get("http://localhost:3000");
|
||||
expect(response.status).toBe(200);
|
||||
expect(response.data).toEqual({ message: "Running", status: 200 });
|
||||
});
|
||||
|
||||
it("should generate a empty distribution list when submission is 0", async () => {
|
||||
const submitters: Submitter[] = [];
|
||||
const bounty = Math.floor(Math.random() * 1e15) + 1;
|
||||
const roundNumber = Math.floor(Math.random() * 1e5) + 1;
|
||||
const distributionList = await distribution(submitters, bounty, roundNumber);
|
||||
expect(distributionList).toEqual({});
|
||||
});
|
||||
|
||||
it("should generate a distribution list contains all the submitters", async () => {
|
||||
const simulatedSubmitters = 5;
|
||||
const submitters: Submitter[] = [];
|
||||
// 10k is the rough maximum number of submitters
|
||||
for (let i = 0; i < simulatedSubmitters; i++) {
|
||||
const publicKey = `mockPublicKey${i}`;
|
||||
submitters.push({
|
||||
publicKey,
|
||||
votes: Math.floor(Math.random() * simulatedSubmitters) - 5000,
|
||||
stake: Math.floor(Math.random() * 1e9) + 1,
|
||||
});
|
||||
}
|
||||
const bounty = Math.floor(Math.random() * 1e15) + 1;
|
||||
const roundNumber = 1;
|
||||
const distributionList = await distribution(submitters, bounty, roundNumber);
|
||||
expect(Object.keys(distributionList).length).toBe(submitters.length);
|
||||
expect(Object.keys(distributionList).sort()).toEqual(submitters.map((submitter) => submitter.publicKey).sort());
|
||||
});
|
||||
});
|
||||
|
||||
afterAll(async () => {
|
||||
_server.close();
|
||||
});
|
110
worker/tests/prod-debug.ts
Normal file
110
worker/tests/prod-debug.ts
Normal file
@ -0,0 +1,110 @@
|
||||
import { spawn } from "cross-spawn";
|
||||
import fs from "fs";
|
||||
import Debugger from "./debugger";
|
||||
import { Tail } from "tail";
|
||||
import path from "path";
|
||||
import chalk from "chalk";
|
||||
import dotenv from "dotenv";
|
||||
|
||||
dotenv.config();
|
||||
|
||||
function startWatching(): void {
|
||||
console.log("Watching for file changes...");
|
||||
// watch and trigger builds
|
||||
build();
|
||||
}
|
||||
|
||||
/* build and webpack the task */
|
||||
function build(): void {
|
||||
console.log("Building...");
|
||||
const child = spawn("npm", ["run", "webpack:test"], {
|
||||
stdio: "inherit",
|
||||
});
|
||||
|
||||
child.on("close", (code: number) => {
|
||||
if (code !== 0) {
|
||||
console.error("Build failed");
|
||||
} else {
|
||||
console.log("Build successful");
|
||||
copyWebpackedFile();
|
||||
}
|
||||
return;
|
||||
});
|
||||
}
|
||||
|
||||
/* copy the task to the Desktop Node runtime folder */
|
||||
async function copyWebpackedFile(): Promise<void> {
|
||||
const debugConfig = await Debugger.getConfig();
|
||||
console.log("debugConfig", debugConfig);
|
||||
const nodeDIR = debugConfig.nodeDir;
|
||||
const sourcePath = path.join(__dirname, debugConfig.webpackedFilePath);
|
||||
const desktopNodeExecutablePath = path.join(nodeDIR, debugConfig.destinationPath);
|
||||
const desktopNodeLogPath = path.join(nodeDIR, debugConfig.logPath);
|
||||
const keywords = debugConfig.keywords;
|
||||
const taskID = debugConfig.taskID;
|
||||
|
||||
if (!sourcePath || !desktopNodeExecutablePath) {
|
||||
console.error("Source path or destination path not specified in .env");
|
||||
return;
|
||||
}
|
||||
|
||||
console.log(`Copying webpacked file from ${sourcePath} to ${desktopNodeExecutablePath}...`);
|
||||
|
||||
fs.copyFile(sourcePath, desktopNodeExecutablePath, async (err) => {
|
||||
if (err) {
|
||||
console.error("Error copying file:", err);
|
||||
} else {
|
||||
console.log("File copied successfully");
|
||||
tailLogs(desktopNodeLogPath, keywords, taskID);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/* tail logs */
|
||||
async function tailLogs(desktopNodeLogPath: string, keywords: string[], taskID: string): Promise<void> {
|
||||
console.log("Watching logs for messages containing ", keywords);
|
||||
|
||||
// Extract the directory path from the full log file path
|
||||
const dirPath = path.dirname(desktopNodeLogPath);
|
||||
|
||||
// Check if the directory exists, create it if it doesn't
|
||||
try {
|
||||
await fs.promises.access(dirPath, fs.constants.F_OK);
|
||||
} catch (dirErr) {
|
||||
console.log(
|
||||
"Unable to find task directory. Please make sure you have the correct task ID set in your .env file, and run the task on the Desktop Node before running prod-debug.",
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Ensure the log file exists, or create it if it doesn't
|
||||
try {
|
||||
await fs.promises.access(desktopNodeLogPath, fs.constants.F_OK);
|
||||
} catch (err) {
|
||||
console.log(`Log file not found, creating ${desktopNodeLogPath}`);
|
||||
await fs.promises.writeFile(desktopNodeLogPath, "", { flag: "a" }); // 'a' flag ensures the file is created if it doesn't exist and not overwritten if it exists
|
||||
}
|
||||
|
||||
let tail = new Tail(desktopNodeLogPath, {
|
||||
separator: "\n",
|
||||
flushAtEOF: true,
|
||||
});
|
||||
|
||||
console.warn(
|
||||
`Now watching logs for messages containing ${keywords.join(",")}. Please start the task ${taskID} and keep it running on the Desktop Node.`,
|
||||
);
|
||||
|
||||
tail.on("line", (data: string) => {
|
||||
if (keywords.some((keyword) => data.includes(keyword))) {
|
||||
console.log(chalk.magenta(data));
|
||||
} else {
|
||||
console.log(data);
|
||||
}
|
||||
});
|
||||
|
||||
+tail.on("error", (error: Error) => {
|
||||
console.log("ERROR: ", error);
|
||||
});
|
||||
}
|
||||
|
||||
startWatching();
|
84
worker/tests/simulateTask.ts
Normal file
84
worker/tests/simulateTask.ts
Normal file
@ -0,0 +1,84 @@
|
||||
import { taskRunner } from "@_koii/task-manager";
|
||||
|
||||
import "../src/index.js";
|
||||
import { namespaceWrapper } from "@_koii/task-manager/namespace-wrapper";
|
||||
import { Keypair } from "@_koii/web3.js";
|
||||
|
||||
const numRounds = parseInt(process.argv[2]) || 1;
|
||||
const roundDelay = parseInt(process.argv[3]) || 5000;
|
||||
const functionDelay = parseInt(process.argv[4]) || 1000;
|
||||
|
||||
let TASK_TIMES: number[] = [];
|
||||
let SUBMISSION_TIMES: number[] = [];
|
||||
let AUDIT_TIMES: number[] = [];
|
||||
|
||||
function sleep(ms: number) {
|
||||
return new Promise((resolve) => setTimeout(resolve, ms));
|
||||
}
|
||||
|
||||
async function executeTasks() {
|
||||
const keypair = Keypair.generate();
|
||||
await namespaceWrapper.stakeOnChain(keypair.publicKey, keypair, keypair.publicKey, 10000);
|
||||
for (let round = 0; round < numRounds; round++) {
|
||||
const taskStartTime = Date.now();
|
||||
await taskRunner.task(round);
|
||||
const taskEndTime = Date.now();
|
||||
TASK_TIMES.push(taskEndTime - taskStartTime);
|
||||
await sleep(functionDelay);
|
||||
|
||||
const taskSubmissionStartTime = Date.now();
|
||||
await taskRunner.submitTask(round);
|
||||
const taskSubmissionEndTime = Date.now();
|
||||
SUBMISSION_TIMES.push(taskSubmissionEndTime - taskSubmissionStartTime);
|
||||
await sleep(functionDelay);
|
||||
|
||||
const auditStartTime = Date.now();
|
||||
await taskRunner.auditTask(round);
|
||||
const auditEndTime = Date.now();
|
||||
AUDIT_TIMES.push(auditEndTime - auditStartTime);
|
||||
await sleep(functionDelay);
|
||||
|
||||
await taskRunner.selectAndGenerateDistributionList(round);
|
||||
await sleep(functionDelay);
|
||||
|
||||
await taskRunner.auditDistribution(round);
|
||||
|
||||
if (round < numRounds - 1) {
|
||||
await sleep(roundDelay);
|
||||
}
|
||||
}
|
||||
console.log("TIME METRICS BELOW");
|
||||
function metrics(name: string, times: number[]) {
|
||||
const average = (arr: number[]) => arr.reduce((a, b) => a + b, 0) / arr.length;
|
||||
const formatTime = (ms: number) => (ms / 1000).toFixed(4);
|
||||
const formatSlot = (ms: number) => Math.ceil(ms / 408);
|
||||
const min = Math.min(...times);
|
||||
const max = Math.max(...times);
|
||||
const avg = average(times);
|
||||
const timeMin = formatTime(min);
|
||||
const timeMax = formatTime(max);
|
||||
const timeAvg = formatTime(avg);
|
||||
const slotMin = formatSlot(min);
|
||||
const slotMax = formatSlot(max);
|
||||
const slotAvg = formatSlot(avg);
|
||||
|
||||
return {
|
||||
Metric: `SIMULATED ${name} WINDOW`,
|
||||
"Avg Time (s)": timeAvg,
|
||||
"Avg Slots": slotAvg,
|
||||
"Min Time (s)": timeMin,
|
||||
"Min Slots": slotMin,
|
||||
"Max Time (s)": timeMax,
|
||||
"Max Slots": slotMax,
|
||||
};
|
||||
}
|
||||
const timeMetrics = metrics("TASK", TASK_TIMES);
|
||||
const submissionMetrics = metrics("SUBMISSION", SUBMISSION_TIMES);
|
||||
const auditMetrics = metrics("AUDIT", AUDIT_TIMES);
|
||||
|
||||
console.table([timeMetrics, submissionMetrics, auditMetrics]);
|
||||
|
||||
console.log("All tasks executed. Test completed.");
|
||||
process.exit(0);
|
||||
}
|
||||
setTimeout(executeTasks, 1500);
|
0
worker/tests/stages/__init__.py
Normal file
0
worker/tests/stages/__init__.py
Normal file
51
worker/tests/stages/audit_summary.py
Normal file
51
worker/tests/stages/audit_summary.py
Normal file
@ -0,0 +1,51 @@
|
||||
"""Test stage for auditing summary."""
|
||||
|
||||
import requests
|
||||
from prometheus_test import Context
|
||||
|
||||
|
||||
async def prepare(context: Context, target_name: str):
|
||||
"""Prepare for auditing summary."""
|
||||
staking_key = context.env.get("WORKER_ID")
|
||||
target_submission = await context.storeGet(f"submission-{target_name}")
|
||||
|
||||
return {
|
||||
"staking_key": staking_key,
|
||||
"round_number": context.round_number,
|
||||
"target_submission": target_submission,
|
||||
"target_name": target_name,
|
||||
}
|
||||
|
||||
|
||||
async def execute(context: Context, prepare_data: dict):
|
||||
"""Execute summary audit test."""
|
||||
staking_key = prepare_data["staking_key"]
|
||||
round_number = prepare_data["round_number"]
|
||||
target_submission = prepare_data["target_submission"]
|
||||
target_name = prepare_data["target_name"]
|
||||
|
||||
# Mock response for audit
|
||||
response = requests.post(
|
||||
"http://localhost:5000/api/builder/summarizer/audit",
|
||||
json={
|
||||
"taskId": context.config.task_id,
|
||||
"roundNumber": round_number,
|
||||
"stakingKey": staking_key,
|
||||
"submitterKey": target_name,
|
||||
"cid": target_submission.get("cid"),
|
||||
"prUrl": target_submission.get("pr_url"),
|
||||
"githubUsername": target_submission.get("github_username"),
|
||||
},
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
raise Exception(f"Failed to audit summary: {response.text}")
|
||||
|
||||
result = response.json()
|
||||
if not result.get("success"):
|
||||
raise Exception("Failed to audit summary")
|
||||
|
||||
# Store audit result
|
||||
await context.storeSet(f"audit-{staking_key}-{target_name}", result.get("data"))
|
||||
|
||||
return True
|
39
worker/tests/stages/fetch_summarizer_todo.py
Normal file
39
worker/tests/stages/fetch_summarizer_todo.py
Normal file
@ -0,0 +1,39 @@
|
||||
"""Test stage for fetching summarizer todo."""
|
||||
|
||||
import requests
|
||||
from prometheus_test import Context
|
||||
|
||||
|
||||
async def prepare(context: Context):
|
||||
"""Prepare for fetching summarizer todo."""
|
||||
return {
|
||||
"staking_key": context.env.get("WORKER_ID"),
|
||||
"round_number": context.round_number,
|
||||
}
|
||||
|
||||
|
||||
async def execute(context: Context, prepare_data: dict):
|
||||
"""Execute fetch summarizer todo test."""
|
||||
staking_key = prepare_data["staking_key"]
|
||||
round_number = prepare_data["round_number"]
|
||||
|
||||
# Mock response for fetching todo
|
||||
response = requests.post(
|
||||
"http://localhost:5000/api/builder/summarizer/fetch-summarizer-todo",
|
||||
json={
|
||||
"stakingKey": staking_key,
|
||||
"roundNumber": round_number,
|
||||
},
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
raise Exception(f"Failed to fetch summarizer todo: {response.text}")
|
||||
|
||||
result = response.json()
|
||||
if not result.get("success"):
|
||||
raise Exception("Failed to fetch summarizer todo")
|
||||
|
||||
# Store todo data for next steps
|
||||
await context.storeSet(f"todo-{staking_key}", result.get("data"))
|
||||
|
||||
return True
|
47
worker/tests/stages/generate_summary.py
Normal file
47
worker/tests/stages/generate_summary.py
Normal file
@ -0,0 +1,47 @@
|
||||
"""Test stage for generating repository summary."""
|
||||
|
||||
import requests
|
||||
from prometheus_test import Context
|
||||
|
||||
|
||||
async def prepare(context: Context):
|
||||
"""Prepare for generating summary."""
|
||||
staking_key = context.env.get("WORKER_ID")
|
||||
todo = await context.storeGet(f"todo-{staking_key}")
|
||||
|
||||
return {
|
||||
"staking_key": staking_key,
|
||||
"round_number": context.round_number,
|
||||
"repo_owner": todo.get("repo_owner"),
|
||||
"repo_name": todo.get("repo_name"),
|
||||
}
|
||||
|
||||
|
||||
async def execute(context: Context, prepare_data: dict):
|
||||
"""Execute summary generation test."""
|
||||
staking_key = prepare_data["staking_key"]
|
||||
round_number = prepare_data["round_number"]
|
||||
repo_owner = prepare_data["repo_owner"]
|
||||
repo_name = prepare_data["repo_name"]
|
||||
|
||||
# Mock response for repo summary generation
|
||||
response = requests.post(
|
||||
"http://localhost:5000/api/builder/summarizer/generate-summary",
|
||||
json={
|
||||
"taskId": context.config.task_id,
|
||||
"round_number": str(round_number),
|
||||
"repo_url": f"https://github.com/{repo_owner}/{repo_name}",
|
||||
},
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
raise Exception(f"Failed to generate summary: {response.text}")
|
||||
|
||||
result = response.json()
|
||||
if not result.get("success"):
|
||||
raise Exception("Failed to generate summary")
|
||||
|
||||
# Store PR URL for next steps
|
||||
await context.storeSet(f"pr-{staking_key}", result.get("data", {}).get("pr_url"))
|
||||
|
||||
return True
|
56
worker/tests/stages/submit_summary.py
Normal file
56
worker/tests/stages/submit_summary.py
Normal file
@ -0,0 +1,56 @@
|
||||
"""Test stage for submitting summary."""
|
||||
|
||||
import requests
|
||||
from prometheus_test import Context
|
||||
|
||||
|
||||
async def prepare(context: Context):
|
||||
"""Prepare for submitting summary."""
|
||||
staking_key = context.env.get("WORKER_ID")
|
||||
pr_url = await context.storeGet(f"pr-{staking_key}")
|
||||
|
||||
return {
|
||||
"staking_key": staking_key,
|
||||
"round_number": context.round_number,
|
||||
"pr_url": pr_url,
|
||||
"github_username": context.env.get("GITHUB_USERNAME"),
|
||||
}
|
||||
|
||||
|
||||
async def execute(context: Context, prepare_data: dict):
|
||||
"""Execute summary submission test."""
|
||||
staking_key = prepare_data["staking_key"]
|
||||
round_number = prepare_data["round_number"]
|
||||
pr_url = prepare_data["pr_url"]
|
||||
github_username = prepare_data["github_username"]
|
||||
|
||||
# Mock response for submission
|
||||
response = requests.post(
|
||||
"http://localhost:5000/api/builder/summarizer/submit",
|
||||
json={
|
||||
"taskId": context.config.task_id,
|
||||
"roundNumber": round_number,
|
||||
"prUrl": pr_url,
|
||||
"stakingKey": staking_key,
|
||||
"githubUsername": github_username,
|
||||
},
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
raise Exception(f"Failed to submit summary: {response.text}")
|
||||
|
||||
result = response.json()
|
||||
if not result.get("success"):
|
||||
raise Exception("Failed to submit summary")
|
||||
|
||||
# Store submission data for audit
|
||||
await context.storeSet(
|
||||
f"submission-{staking_key}",
|
||||
{
|
||||
"cid": result.get("data", {}).get("cid"),
|
||||
"pr_url": pr_url,
|
||||
"github_username": github_username,
|
||||
},
|
||||
)
|
||||
|
||||
return True
|
31
worker/tests/stages/validate_api_keys.py
Normal file
31
worker/tests/stages/validate_api_keys.py
Normal file
@ -0,0 +1,31 @@
|
||||
"""Test stage for validating API keys."""
|
||||
|
||||
import requests
|
||||
from prometheus_test import Context
|
||||
|
||||
|
||||
async def prepare(context: Context):
|
||||
"""Prepare for API key validation test."""
|
||||
return {
|
||||
"api_key": context.env.get("ANTHROPIC_API_KEY"),
|
||||
}
|
||||
|
||||
|
||||
async def execute(context: Context, prepare_data: dict):
|
||||
"""Execute API key validation test."""
|
||||
api_key = prepare_data["api_key"]
|
||||
|
||||
# Mock response for Anthropic API validation
|
||||
response = requests.post(
|
||||
"http://localhost:5000/api/builder/summarizer/validate-api-key",
|
||||
json={"api_key": api_key},
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
raise Exception(f"API key validation failed: {response.text}")
|
||||
|
||||
result = response.json()
|
||||
if not result.get("valid"):
|
||||
raise Exception("API key is not valid")
|
||||
|
||||
return True
|
33
worker/tests/stages/validate_github.py
Normal file
33
worker/tests/stages/validate_github.py
Normal file
@ -0,0 +1,33 @@
|
||||
"""Test stage for validating GitHub credentials."""
|
||||
|
||||
import requests
|
||||
from prometheus_test import Context
|
||||
|
||||
|
||||
async def prepare(context: Context):
|
||||
"""Prepare for GitHub validation test."""
|
||||
return {
|
||||
"github_username": context.env.get("GITHUB_USERNAME"),
|
||||
"github_token": context.env.get("GITHUB_TOKEN"),
|
||||
}
|
||||
|
||||
|
||||
async def execute(context: Context, prepare_data: dict):
|
||||
"""Execute GitHub validation test."""
|
||||
username = prepare_data["github_username"]
|
||||
token = prepare_data["github_token"]
|
||||
|
||||
# Mock response for GitHub validation
|
||||
response = requests.post(
|
||||
"http://localhost:5000/api/builder/summarizer/validate-github",
|
||||
json={"username": username, "token": token},
|
||||
)
|
||||
|
||||
if response.status_code != 200:
|
||||
raise Exception(f"GitHub validation failed: {response.text}")
|
||||
|
||||
result = response.json()
|
||||
if not result.get("valid"):
|
||||
raise Exception("GitHub credentials are not valid")
|
||||
|
||||
return True
|
85
worker/tests/steps.py
Normal file
85
worker/tests/steps.py
Normal file
@ -0,0 +1,85 @@
|
||||
"""Test step definitions."""
|
||||
|
||||
from prometheus_test import TestStep
|
||||
from functools import partial
|
||||
from .stages import (
|
||||
validate_api_keys,
|
||||
validate_github,
|
||||
fetch_summarizer_todo,
|
||||
generate_summary,
|
||||
submit_summary,
|
||||
audit_summary,
|
||||
)
|
||||
|
||||
steps = [
|
||||
TestStep(
|
||||
name="validate_api_keys",
|
||||
description="Validate Anthropic API key",
|
||||
prepare=validate_api_keys.prepare,
|
||||
execute=validate_api_keys.execute,
|
||||
worker="worker1",
|
||||
),
|
||||
TestStep(
|
||||
name="validate_github",
|
||||
description="Validate GitHub credentials",
|
||||
prepare=validate_github.prepare,
|
||||
execute=validate_github.execute,
|
||||
worker="worker1",
|
||||
),
|
||||
TestStep(
|
||||
name="fetch_todo_worker1",
|
||||
description="Fetch summarizer todo for worker1",
|
||||
prepare=fetch_summarizer_todo.prepare,
|
||||
execute=fetch_summarizer_todo.execute,
|
||||
worker="worker1",
|
||||
),
|
||||
TestStep(
|
||||
name="fetch_todo_worker2",
|
||||
description="Fetch summarizer todo for worker2",
|
||||
prepare=fetch_summarizer_todo.prepare,
|
||||
execute=fetch_summarizer_todo.execute,
|
||||
worker="worker2",
|
||||
),
|
||||
TestStep(
|
||||
name="generate_summary_worker1",
|
||||
description="Generate summary for worker1's todo",
|
||||
prepare=generate_summary.prepare,
|
||||
execute=generate_summary.execute,
|
||||
worker="worker1",
|
||||
),
|
||||
TestStep(
|
||||
name="generate_summary_worker2",
|
||||
description="Generate summary for worker2's todo",
|
||||
prepare=generate_summary.prepare,
|
||||
execute=generate_summary.execute,
|
||||
worker="worker2",
|
||||
),
|
||||
TestStep(
|
||||
name="submit_summary_worker1",
|
||||
description="Submit summary for worker1",
|
||||
prepare=submit_summary.prepare,
|
||||
execute=submit_summary.execute,
|
||||
worker="worker1",
|
||||
),
|
||||
TestStep(
|
||||
name="submit_summary_worker2",
|
||||
description="Submit summary for worker2",
|
||||
prepare=submit_summary.prepare,
|
||||
execute=submit_summary.execute,
|
||||
worker="worker2",
|
||||
),
|
||||
TestStep(
|
||||
name="audit_worker1",
|
||||
description="Worker1 audits Worker2's submission",
|
||||
prepare=partial(audit_summary.prepare, target_name="worker2"),
|
||||
execute=audit_summary.execute,
|
||||
worker="worker1",
|
||||
),
|
||||
TestStep(
|
||||
name="audit_worker2",
|
||||
description="Worker2 audits Worker1's submission",
|
||||
prepare=partial(audit_summary.prepare, target_name="worker1"),
|
||||
execute=audit_summary.execute,
|
||||
worker="worker2",
|
||||
),
|
||||
]
|
19
worker/tests/test.ts
Normal file
19
worker/tests/test.ts
Normal file
@ -0,0 +1,19 @@
|
||||
// async function testSlackWebhook(){
|
||||
// const slackResponse = await fetch('https://hooks.slack.com/services/', {
|
||||
// method: "POST",
|
||||
// headers: {
|
||||
// "Content-Type": "application/json",
|
||||
// },
|
||||
// body: JSON.stringify({
|
||||
// text: `[TASK] Error summarizing issue:\n ${JSON.stringify({
|
||||
// status: "error",
|
||||
// data: {
|
||||
// message: "test"
|
||||
// }
|
||||
// })}`
|
||||
// }),
|
||||
// });
|
||||
// console.log("[TASK] slackResponse: ", slackResponse);
|
||||
// }
|
||||
|
||||
// testSlackWebhook();
|
1211
worker/tests/wasm/bincode_js.cjs
Normal file
1211
worker/tests/wasm/bincode_js.cjs
Normal file
File diff suppressed because it is too large
Load Diff
225
worker/tests/wasm/bincode_js.d.ts
vendored
Normal file
225
worker/tests/wasm/bincode_js.d.ts
vendored
Normal file
@ -0,0 +1,225 @@
|
||||
/* tslint:disable */
|
||||
/* eslint-disable */
|
||||
/**
|
||||
* @param {any} val
|
||||
* @returns {any}
|
||||
*/
|
||||
export function bincode_js_deserialize(val: any): any;
|
||||
/**
|
||||
* @param {any} val
|
||||
* @returns {any}
|
||||
*/
|
||||
export function borsh_bpf_js_deserialize(val: any): any;
|
||||
/**
|
||||
* Initialize Javascript logging and panic handler
|
||||
*/
|
||||
export function solana_program_init(): void;
|
||||
/**
|
||||
* A hash; the 32-byte output of a hashing algorithm.
|
||||
*
|
||||
* This struct is used most often in `solana-sdk` and related crates to contain
|
||||
* a [SHA-256] hash, but may instead contain a [blake3] hash, as created by the
|
||||
* [`blake3`] module (and used in [`Message::hash`]).
|
||||
*
|
||||
* [SHA-256]: https://en.wikipedia.org/wiki/SHA-2
|
||||
* [blake3]: https://github.com/BLAKE3-team/BLAKE3
|
||||
* [`blake3`]: crate::blake3
|
||||
* [`Message::hash`]: crate::message::Message::hash
|
||||
*/
|
||||
export class Hash {
|
||||
free(): void;
|
||||
/**
|
||||
* Create a new Hash object
|
||||
*
|
||||
* * `value` - optional hash as a base58 encoded string, `Uint8Array`, `[number]`
|
||||
* @param {any} value
|
||||
*/
|
||||
constructor(value: any);
|
||||
/**
|
||||
* Return the base58 string representation of the hash
|
||||
* @returns {string}
|
||||
*/
|
||||
toString(): string;
|
||||
/**
|
||||
* Checks if two `Hash`s are equal
|
||||
* @param {Hash} other
|
||||
* @returns {boolean}
|
||||
*/
|
||||
equals(other: Hash): boolean;
|
||||
/**
|
||||
* Return the `Uint8Array` representation of the hash
|
||||
* @returns {Uint8Array}
|
||||
*/
|
||||
toBytes(): Uint8Array;
|
||||
}
|
||||
/**
|
||||
* A directive for a single invocation of a Solana program.
|
||||
*
|
||||
* An instruction specifies which program it is calling, which accounts it may
|
||||
* read or modify, and additional data that serves as input to the program. One
|
||||
* or more instructions are included in transactions submitted by Solana
|
||||
* clients. Instructions are also used to describe [cross-program
|
||||
* invocations][cpi].
|
||||
*
|
||||
* [cpi]: https://docs.solana.com/developing/programming-model/calling-between-programs
|
||||
*
|
||||
* During execution, a program will receive a list of account data as one of
|
||||
* its arguments, in the same order as specified during `Instruction`
|
||||
* construction.
|
||||
*
|
||||
* While Solana is agnostic to the format of the instruction data, it has
|
||||
* built-in support for serialization via [`borsh`] and [`bincode`].
|
||||
*
|
||||
* [`borsh`]: https://docs.rs/borsh/latest/borsh/
|
||||
* [`bincode`]: https://docs.rs/bincode/latest/bincode/
|
||||
*
|
||||
* # Specifying account metadata
|
||||
*
|
||||
* When constructing an [`Instruction`], a list of all accounts that may be
|
||||
* read or written during the execution of that instruction must be supplied as
|
||||
* [`AccountMeta`] values.
|
||||
*
|
||||
* Any account whose data may be mutated by the program during execution must
|
||||
* be specified as writable. During execution, writing to an account that was
|
||||
* not specified as writable will cause the transaction to fail. Writing to an
|
||||
* account that is not owned by the program will cause the transaction to fail.
|
||||
*
|
||||
* Any account whose lamport balance may be mutated by the program during
|
||||
* execution must be specified as writable. During execution, mutating the
|
||||
* lamports of an account that was not specified as writable will cause the
|
||||
* transaction to fail. While _subtracting_ lamports from an account not owned
|
||||
* by the program will cause the transaction to fail, _adding_ lamports to any
|
||||
* account is allowed, as long is it is mutable.
|
||||
*
|
||||
* Accounts that are not read or written by the program may still be specified
|
||||
* in an `Instruction`'s account list. These will affect scheduling of program
|
||||
* execution by the runtime, but will otherwise be ignored.
|
||||
*
|
||||
* When building a transaction, the Solana runtime coalesces all accounts used
|
||||
* by all instructions in that transaction, along with accounts and permissions
|
||||
* required by the runtime, into a single account list. Some accounts and
|
||||
* account permissions required by the runtime to process a transaction are
|
||||
* _not_ required to be included in an `Instruction`s account list. These
|
||||
* include:
|
||||
*
|
||||
* - The program ID — it is a separate field of `Instruction`
|
||||
* - The transaction's fee-paying account — it is added during [`Message`]
|
||||
* construction. A program may still require the fee payer as part of the
|
||||
* account list if it directly references it.
|
||||
*
|
||||
* [`Message`]: crate::message::Message
|
||||
*
|
||||
* Programs may require signatures from some accounts, in which case they
|
||||
* should be specified as signers during `Instruction` construction. The
|
||||
* program must still validate during execution that the account is a signer.
|
||||
*/
|
||||
export class Instruction {
|
||||
free(): void;
|
||||
}
|
||||
/**
|
||||
*/
|
||||
export class Instructions {
|
||||
free(): void;
|
||||
/**
|
||||
*/
|
||||
constructor();
|
||||
/**
|
||||
* @param {Instruction} instruction
|
||||
*/
|
||||
push(instruction: Instruction): void;
|
||||
}
|
||||
/**
|
||||
* A Solana transaction message (legacy).
|
||||
*
|
||||
* See the [`message`] module documentation for further description.
|
||||
*
|
||||
* [`message`]: crate::message
|
||||
*
|
||||
* Some constructors accept an optional `payer`, the account responsible for
|
||||
* paying the cost of executing a transaction. In most cases, callers should
|
||||
* specify the payer explicitly in these constructors. In some cases though,
|
||||
* the caller is not _required_ to specify the payer, but is still allowed to:
|
||||
* in the `Message` structure, the first account is always the fee-payer, so if
|
||||
* the caller has knowledge that the first account of the constructed
|
||||
* transaction's `Message` is both a signer and the expected fee-payer, then
|
||||
* redundantly specifying the fee-payer is not strictly required.
|
||||
*/
|
||||
export class Message {
|
||||
free(): void;
|
||||
/**
|
||||
* The id of a recent ledger entry.
|
||||
*/
|
||||
recent_blockhash: Hash;
|
||||
}
|
||||
/**
|
||||
* The address of a [Solana account][acc].
|
||||
*
|
||||
* Some account addresses are [ed25519] public keys, with corresponding secret
|
||||
* keys that are managed off-chain. Often, though, account addresses do not
|
||||
* have corresponding secret keys — as with [_program derived
|
||||
* addresses_][pdas] — or the secret key is not relevant to the operation
|
||||
* of a program, and may have even been disposed of. As running Solana programs
|
||||
* can not safely create or manage secret keys, the full [`Keypair`] is not
|
||||
* defined in `solana-program` but in `solana-sdk`.
|
||||
*
|
||||
* [acc]: https://docs.solana.com/developing/programming-model/accounts
|
||||
* [ed25519]: https://ed25519.cr.yp.to/
|
||||
* [pdas]: https://docs.solana.com/developing/programming-model/calling-between-programs#program-derived-addresses
|
||||
* [`Keypair`]: https://docs.rs/solana-sdk/latest/solana_sdk/signer/keypair/struct.Keypair.html
|
||||
*/
|
||||
export class Pubkey {
|
||||
free(): void;
|
||||
/**
|
||||
* Create a new Pubkey object
|
||||
*
|
||||
* * `value` - optional public key as a base58 encoded string, `Uint8Array`, `[number]`
|
||||
* @param {any} value
|
||||
*/
|
||||
constructor(value: any);
|
||||
/**
|
||||
* Return the base58 string representation of the public key
|
||||
* @returns {string}
|
||||
*/
|
||||
toString(): string;
|
||||
/**
|
||||
* Check if a `Pubkey` is on the ed25519 curve.
|
||||
* @returns {boolean}
|
||||
*/
|
||||
isOnCurve(): boolean;
|
||||
/**
|
||||
* Checks if two `Pubkey`s are equal
|
||||
* @param {Pubkey} other
|
||||
* @returns {boolean}
|
||||
*/
|
||||
equals(other: Pubkey): boolean;
|
||||
/**
|
||||
* Return the `Uint8Array` representation of the public key
|
||||
* @returns {Uint8Array}
|
||||
*/
|
||||
toBytes(): Uint8Array;
|
||||
/**
|
||||
* Derive a Pubkey from another Pubkey, string seed, and a program id
|
||||
* @param {Pubkey} base
|
||||
* @param {string} seed
|
||||
* @param {Pubkey} owner
|
||||
* @returns {Pubkey}
|
||||
*/
|
||||
static createWithSeed(base: Pubkey, seed: string, owner: Pubkey): Pubkey;
|
||||
/**
|
||||
* Derive a program address from seeds and a program id
|
||||
* @param {any[]} seeds
|
||||
* @param {Pubkey} program_id
|
||||
* @returns {Pubkey}
|
||||
*/
|
||||
static createProgramAddress(seeds: any[], program_id: Pubkey): Pubkey;
|
||||
/**
|
||||
* Find a valid program address
|
||||
*
|
||||
* Returns:
|
||||
* * `[PubKey, number]` - the program address and bump seed
|
||||
* @param {any[]} seeds
|
||||
* @param {Pubkey} program_id
|
||||
* @returns {any}
|
||||
*/
|
||||
static findProgramAddress(seeds: any[], program_id: Pubkey): any;
|
||||
}
|
BIN
worker/tests/wasm/bincode_js_bg.wasm
Normal file
BIN
worker/tests/wasm/bincode_js_bg.wasm
Normal file
Binary file not shown.
44
worker/tests/wasm/bincode_js_bg.wasm.d.ts
vendored
Normal file
44
worker/tests/wasm/bincode_js_bg.wasm.d.ts
vendored
Normal file
@ -0,0 +1,44 @@
|
||||
/* tslint:disable */
|
||||
/* eslint-disable */
|
||||
export const memory: WebAssembly.Memory;
|
||||
export function bincode_js_deserialize(a: number): number;
|
||||
export function borsh_bpf_js_deserialize(a: number): number;
|
||||
export function __wbg_message_free(a: number): void;
|
||||
export function __wbg_get_message_recent_blockhash(a: number): number;
|
||||
export function __wbg_set_message_recent_blockhash(a: number, b: number): void;
|
||||
export function solana_program_init(): void;
|
||||
export function systeminstruction_createAccount(a: number, b: number, c: number, d: number, e: number): number;
|
||||
export function systeminstruction_createAccountWithSeed(a: number, b: number, c: number, d: number, e: number, f: number, g: number, h: number): number;
|
||||
export function systeminstruction_assign(a: number, b: number): number;
|
||||
export function systeminstruction_assignWithSeed(a: number, b: number, c: number, d: number, e: number): number;
|
||||
export function systeminstruction_transfer(a: number, b: number, c: number): number;
|
||||
export function systeminstruction_transferWithSeed(a: number, b: number, c: number, d: number, e: number, f: number, g: number): number;
|
||||
export function systeminstruction_allocate(a: number, b: number): number;
|
||||
export function systeminstruction_allocateWithSeed(a: number, b: number, c: number, d: number, e: number, f: number): number;
|
||||
export function systeminstruction_createNonceAccount(a: number, b: number, c: number, d: number): number;
|
||||
export function systeminstruction_advanceNonceAccount(a: number, b: number): number;
|
||||
export function systeminstruction_withdrawNonceAccount(a: number, b: number, c: number, d: number): number;
|
||||
export function systeminstruction_authorizeNonceAccount(a: number, b: number, c: number): number;
|
||||
export function __wbg_instruction_free(a: number): void;
|
||||
export function pubkey_constructor(a: number, b: number): void;
|
||||
export function pubkey_toString(a: number, b: number): void;
|
||||
export function pubkey_isOnCurve(a: number): number;
|
||||
export function pubkey_equals(a: number, b: number): number;
|
||||
export function pubkey_toBytes(a: number, b: number): void;
|
||||
export function pubkey_createWithSeed(a: number, b: number, c: number, d: number, e: number): void;
|
||||
export function pubkey_createProgramAddress(a: number, b: number, c: number, d: number): void;
|
||||
export function pubkey_findProgramAddress(a: number, b: number, c: number, d: number): void;
|
||||
export function __wbg_instructions_free(a: number): void;
|
||||
export function instructions_constructor(): number;
|
||||
export function instructions_push(a: number, b: number): void;
|
||||
export function hash_constructor(a: number, b: number): void;
|
||||
export function hash_toString(a: number, b: number): void;
|
||||
export function hash_equals(a: number, b: number): number;
|
||||
export function hash_toBytes(a: number, b: number): void;
|
||||
export function __wbg_pubkey_free(a: number): void;
|
||||
export function __wbg_hash_free(a: number): void;
|
||||
export function __wbindgen_malloc(a: number, b: number): number;
|
||||
export function __wbindgen_realloc(a: number, b: number, c: number, d: number): number;
|
||||
export function __wbindgen_add_to_stack_pointer(a: number): number;
|
||||
export function __wbindgen_free(a: number, b: number, c: number): void;
|
||||
export function __wbindgen_exn_store(a: number): void;
|
BIN
worker/tests/wasm/zstd.wasm
Executable file
BIN
worker/tests/wasm/zstd.wasm
Executable file
Binary file not shown.
31
worker/tests/webpack.config.js
Normal file
31
worker/tests/webpack.config.js
Normal file
@ -0,0 +1,31 @@
|
||||
import path from 'path'
|
||||
import Dotenv from 'dotenv-webpack'
|
||||
import { fileURLToPath } from "url";
|
||||
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = path.dirname(__filename);
|
||||
|
||||
export default {
|
||||
mode: "development",
|
||||
entry: "./src/index.ts",
|
||||
output: {
|
||||
filename: "main.js",
|
||||
path: path.resolve(__dirname, "dist"),
|
||||
libraryTarget: "commonjs2",
|
||||
},
|
||||
target: "node",
|
||||
resolve: {
|
||||
extensions: [".ts", ".js"],
|
||||
},
|
||||
module: {
|
||||
rules: [
|
||||
{
|
||||
test: /\.ts$/,
|
||||
use: "ts-loader",
|
||||
exclude: /node_modules/,
|
||||
},
|
||||
],
|
||||
},
|
||||
devtool: "source-map",
|
||||
plugins: [new Dotenv()],
|
||||
};
|
29
worker/tests/workers.json
Normal file
29
worker/tests/workers.json
Normal file
@ -0,0 +1,29 @@
|
||||
{
|
||||
"worker1": {
|
||||
"port": 5001,
|
||||
"env": {
|
||||
"WORKER_ID": "worker1",
|
||||
"ANTHROPIC_API_KEY": "test_key",
|
||||
"GITHUB_USERNAME": "test_user",
|
||||
"GITHUB_TOKEN": "test_token"
|
||||
}
|
||||
},
|
||||
"worker2": {
|
||||
"port": 5002,
|
||||
"env": {
|
||||
"WORKER_ID": "worker2",
|
||||
"ANTHROPIC_API_KEY": "test_key",
|
||||
"GITHUB_USERNAME": "test_user",
|
||||
"GITHUB_TOKEN": "test_token"
|
||||
}
|
||||
},
|
||||
"leader": {
|
||||
"port": 5000,
|
||||
"env": {
|
||||
"WORKER_ID": "leader",
|
||||
"ANTHROPIC_API_KEY": "test_key",
|
||||
"GITHUB_USERNAME": "test_user",
|
||||
"GITHUB_TOKEN": "test_token"
|
||||
}
|
||||
}
|
||||
}
|
25
worker/tsconfig.json
Normal file
25
worker/tsconfig.json
Normal file
@ -0,0 +1,25 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
/* Language and Environment */
|
||||
"target": "es2020",
|
||||
|
||||
/* Modules */
|
||||
"module": "CommonJS",
|
||||
"rootDir": "./",
|
||||
"outDir": "./build",
|
||||
"esModuleInterop": true,
|
||||
"forceConsistentCasingInFileNames": true,
|
||||
"moduleResolution": "Node",
|
||||
|
||||
/* Type Checking */
|
||||
"strict": true,
|
||||
"skipLibCheck": true,
|
||||
"allowJs": true,
|
||||
"declaration": false,
|
||||
"sourceMap": true,
|
||||
"noEmit": false,
|
||||
"allowImportingTsExtensions": false
|
||||
},
|
||||
"include": ["src/**/*", "tests/**/*", "../api/builder/src/orcaSettings.js"],
|
||||
"exclude": ["node_modules", "dist", "build"]
|
||||
}
|
29
worker/tsconfig.tests.json
Normal file
29
worker/tsconfig.tests.json
Normal file
@ -0,0 +1,29 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
/* Language and Environment */
|
||||
"target": "es2020",
|
||||
|
||||
/* Modules */
|
||||
"module": "commonjs",
|
||||
"moduleResolution": "Node",
|
||||
"rootDir": "./tests",
|
||||
"esModuleInterop": true,
|
||||
"allowSyntheticDefaultImports": true,
|
||||
"forceConsistentCasingInFileNames": true,
|
||||
|
||||
/* Type Checking */
|
||||
"strict": true,
|
||||
"useUnknownInCatchVariables": true,
|
||||
"noUnusedLocals": true,
|
||||
"noUnusedParameters": true,
|
||||
"noImplicitReturns": true,
|
||||
"noFallthroughCasesInSwitch": true,
|
||||
"noUncheckedIndexedAccess": true,
|
||||
"noImplicitOverride": true,
|
||||
"noPropertyAccessFromIndexSignature": true,
|
||||
|
||||
/* Completeness */
|
||||
"skipLibCheck": true
|
||||
},
|
||||
"include": ["tests/**/*"]
|
||||
}
|
41
worker/webpack.config.mjs
Normal file
41
worker/webpack.config.mjs
Normal file
@ -0,0 +1,41 @@
|
||||
import path from "path";
|
||||
import { fileURLToPath } from "url";
|
||||
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = path.dirname(__filename);
|
||||
|
||||
export default {
|
||||
mode: "development",
|
||||
entry: "./src/index.ts",
|
||||
output: {
|
||||
filename: "main.js",
|
||||
path: path.resolve(__dirname, "dist"),
|
||||
libraryTarget: "commonjs2",
|
||||
clean: true
|
||||
},
|
||||
target: "node",
|
||||
|
||||
resolve: {
|
||||
extensions: [".ts", ".js"]
|
||||
},
|
||||
|
||||
module: {
|
||||
rules: [
|
||||
{
|
||||
test: /\.ts$/,
|
||||
use: {
|
||||
loader: "ts-loader",
|
||||
options: {
|
||||
transpileOnly: true
|
||||
}
|
||||
},
|
||||
exclude: /node_modules/
|
||||
}
|
||||
]
|
||||
},
|
||||
|
||||
plugins: [
|
||||
|
||||
],
|
||||
devtool: "source-map",
|
||||
};
|
Reference in New Issue
Block a user