transfer from monorepo

This commit is contained in:
Laura Abro
2025-04-24 10:24:42 -03:00
parent a2175785d5
commit 1c6fc5540b
95 changed files with 7110 additions and 0 deletions

68
worker/tests/README.md Normal file
View File

@ -0,0 +1,68 @@
# Summarizer Task Tests
This directory contains end-to-end tests for the summarizer task using the Prometheus test framework.
## Structure
```
tests/
├── config.yaml # Test configuration
├── workers.json # Worker configuration
├── data/ # Test data
│ ├── todos.json # Sample todo items
│ └── issues.json # Sample issues
├── stages/ # Test stages implementation
├── e2e.py # Test runner script
└── steps.py # Test steps definition
```
## Prerequisites
1. Install the test framework:
```bash
pip install -e test-framework/
```
2. Set up environment variables in `.env`:
```
ANTHROPIC_API_KEY=your_test_key
GITHUB_USERNAME=your_test_username
GITHUB_TOKEN=your_test_token
```
## Running Tests
To run the tests:
```bash
python -m tests.e2e
```
To force reset databases before running:
```bash
python -m tests.e2e --reset
```
## Test Flow
1. API Key Validation
- Validates Anthropic API key
2. GitHub Validation
- Validates GitHub credentials
3. Todo Management
- Fetches todos for each worker
- Generates summaries
- Submits results
4. Audit Process
- Workers audit each other's submissions
## Adding New Tests
1. Create a new stage in `stages/`
2. Add stage to `stages/__init__.py`
3. Add test step in `steps.py`
4. Update test data in `data/` if needed

12
worker/tests/config.ts Normal file
View File

@ -0,0 +1,12 @@
import "dotenv/config";
export const TASK_ID =
process.env.TASK_ID || "BXbYKFdXZhQgEaMFbeShaisQBYG1FD4MiSf9gg4n6mVn";
export const WEBPACKED_FILE_PATH =
process.env.WEBPACKED_FILE_PATH || "../dist/main.js";
const envKeywords = process.env.TEST_KEYWORDS ?? "";
export const TEST_KEYWORDS = envKeywords
? envKeywords.split(",")
: ["TEST", "EZ TESTING"];

16
worker/tests/config.yaml Normal file
View File

@ -0,0 +1,16 @@
task_id: "summarizer"
base_port: 5000
max_rounds: 3
data_dir: data
workers_config: workers.json
mongodb:
database: summarizer_test
collections:
todos:
data_file: todos.json
required_count: 1
issues:
data_file: issues.json
required_count: 1

View File

@ -0,0 +1,16 @@
[
{
"taskId": "summarizer",
"githubUrl": "https://github.com/test_owner/test_repo/issues/1",
"title": "Test Issue 1",
"body": "This is a test issue for summarization",
"status": "open"
},
{
"taskId": "summarizer",
"githubUrl": "https://github.com/test_owner/test_repo/issues/2",
"title": "Test Issue 2",
"body": "This is another test issue for summarization",
"status": "open"
}
]

View File

@ -0,0 +1,20 @@
[
{
"taskId": "summarizer",
"roundNumber": 1,
"repo_owner": "test_owner",
"repo_name": "test_repo",
"prUrl": "https://github.com/test_owner/test_repo/pull/1",
"status": "pending",
"stakingKey": "test_key_1"
},
{
"taskId": "summarizer",
"roundNumber": 1,
"repo_owner": "test_owner",
"repo_name": "test_repo",
"prUrl": "https://github.com/test_owner/test_repo/pull/2",
"status": "pending",
"stakingKey": "test_key_2"
}
]

112
worker/tests/debugger.ts Normal file
View File

@ -0,0 +1,112 @@
import "dotenv/config";
import os from "os";
import path from "path";
import { Connection, PublicKey } from "@_koii/web3.js";
import { borsh_bpf_js_deserialize } from "./wasm/bincode_js.cjs";
import { TASK_ID, WEBPACKED_FILE_PATH, TEST_KEYWORDS } from "./config";
class Debugger {
/*
Create .env file with following variables or directly input values to be used in live-debugging mode.
*/
static taskID = TASK_ID;
static webpackedFilePath = WEBPACKED_FILE_PATH;
static keywords = TEST_KEYWORDS;
static nodeDir: string;
static async getConfig() {
Debugger.nodeDir = await this.getNodeDirectory();
let destinationPath = "executables/" + (await this.getAuditProgram()) + ".js";
let logPath = "namespace/" + TASK_ID + "/task.log";
console.log("Debugger.nodeDir", Debugger.nodeDir);
return {
webpackedFilePath: Debugger.webpackedFilePath,
destinationPath: destinationPath,
keywords: Debugger.keywords,
logPath: logPath,
nodeDir: Debugger.nodeDir,
taskID: Debugger.taskID,
};
}
static async getNodeDirectory() {
if (Debugger.nodeDir) {
return Debugger.nodeDir;
}
const homeDirectory = os.homedir();
let nodeDirectory: string;
switch (os.platform()) {
case "linux":
nodeDirectory = path.join(homeDirectory, ".config", "KOII-Desktop-Node");
break;
case "darwin":
nodeDirectory = path.join(homeDirectory, "Library", "Application Support", "KOII-Desktop-Node");
break;
default:
// Windows is the default
nodeDirectory = path.join(homeDirectory, "AppData", "Roaming", "KOII-Desktop-Node");
}
return nodeDirectory;
}
static async getAuditProgram() {
const connection = new Connection("https://mainnet.koii.network");
const taskId = Debugger.taskID;
const accountInfo = await connection.getAccountInfo(new PublicKey(taskId));
if (!accountInfo?.data) {
console.log(`${taskId} doesn't contain any distribution list data`);
return null;
}
let data;
const owner = accountInfo.owner.toBase58();
if (owner === "Koiitask22222222222222222222222222222222222") {
data = JSON.parse(accountInfo.data.toString());
} else if (owner === "KPLTRVs6jA7QTthuJH2cEmyCEskFbSV2xpZw46cganN") {
const buffer = accountInfo.data;
data = borsh_bpf_js_deserialize(buffer);
data = parseTaskState(data);
} else {
console.error(`Not a valid Task ID ${taskId}`);
return null;
}
console.log("data.task_audit_program", data.task_audit_program);
return data.task_audit_program;
}
}
function parseTaskState(taskState: any) {
taskState.stake_list = objectify(taskState.stake_list, true);
taskState.ip_address_list = objectify(taskState.ip_address_list, true);
taskState.distributions_audit_record = objectify(taskState.distributions_audit_record, true);
taskState.distributions_audit_trigger = objectify(taskState.distributions_audit_trigger, true);
taskState.submissions = objectify(taskState.submissions, true);
taskState.submissions_audit_trigger = objectify(taskState.submissions_audit_trigger, true);
taskState.distribution_rewards_submission = objectify(taskState.distribution_rewards_submission, true);
taskState.available_balances = objectify(taskState.available_balances, true);
return taskState;
}
function objectify(data: any, recursive = false) {
if (data instanceof Map) {
const obj = Object.fromEntries(data);
if (recursive) {
for (const key in obj) {
if (obj[key] instanceof Map) {
obj[key] = objectify(obj[key], true);
} else if (typeof obj[key] === "object" && obj[key] !== null) {
obj[key] = objectify(obj[key], true);
}
}
}
return obj;
}
return data;
}
export default Debugger;

62
worker/tests/e2e.py Normal file
View File

@ -0,0 +1,62 @@
"""End-to-end test for the summarizer task."""
from pathlib import Path
from prometheus_test import TestRunner
import dotenv
import argparse
import uuid
from .steps import steps
dotenv.load_dotenv()
def parse_args():
parser = argparse.ArgumentParser(description="Run summarizer test sequence")
parser.add_argument(
"--reset",
action="store_true",
help="Force reset of all databases before running tests",
)
return parser.parse_args()
def post_load_callback(db):
"""Post-load callback to process MongoDB data after JSON import"""
# Process todos collection
todos = list(db.todos.find({"taskId": runner.config.task_id}))
for todo in todos:
if "uuid" not in todo:
todo["uuid"] = str(uuid.uuid4())
db.todos.replace_one({"_id": todo["_id"]}, todo)
# Process issues collection
issues = list(db.issues.find({"taskId": runner.config.task_id}))
for issue in issues:
if "uuid" not in issue:
issue["uuid"] = str(uuid.uuid4())
db.issues.replace_one({"_id": issue["_id"]}, issue)
# Global reference to the test runner
runner = None
def main():
global runner
args = parse_args()
# Create test runner with config from YAML
base_dir = Path(__file__).parent
runner = TestRunner(
steps=steps,
config_file=base_dir / "config.yaml",
config_overrides={"post_load_callback": post_load_callback},
)
# Run test sequence
runner.run(force_reset=args.reset)
if __name__ == "__main__":
main()

188
worker/tests/main.test.ts Normal file
View File

@ -0,0 +1,188 @@
import { initializeTaskManager, taskRunner } from "@_koii/task-manager";
import { setup } from "../src/task/0-setup";
import { task } from "../src/task/1-task";
import { submission } from "../src/task/2-submission";
import { audit } from "../src/task/3-audit";
import { distribution } from "../src/task/4-distribution";
import { routes } from "../src/task/5-routes";
import { namespaceWrapper, _server } from "@_koii/task-manager/namespace-wrapper";
import Joi from "joi";
import axios from "axios";
import { Submitter } from "@_koii/task-manager";
beforeAll(async () => {
await namespaceWrapper.defaultTaskSetup();
initializeTaskManager({
setup,
task,
submission,
audit,
distribution,
routes,
});
});
describe("Performing the task", () => {
it("should performs the core logic task", async () => {
const round = 1;
await taskRunner.task(round);
const value = await namespaceWrapper.storeGet("value");
expect(value).toBeDefined();
expect(value).not.toBeNull();
});
it("should make the submission to k2 for dummy round 1", async () => {
const round = 1;
await taskRunner.submitTask(round);
const taskState = await namespaceWrapper.getTaskState({});
const schema = Joi.object()
.pattern(
Joi.string(),
Joi.object().pattern(
Joi.string(),
Joi.object({
submission_value: Joi.string().required(),
slot: Joi.number().integer().required(),
round: Joi.number().integer().required(),
}),
),
)
.required()
.min(1);
const validationResult = schema.validate(taskState?.submissions);
try {
expect(validationResult.error).toBeUndefined();
} catch (e) {
throw new Error("Submission doesn't exist or is incorrect");
}
});
it("should make an audit on submission", async () => {
const round = 1;
await taskRunner.auditTask(round);
const taskState = await namespaceWrapper.getTaskState({});
console.log("TASK STATE", taskState);
console.log("audit task", taskState?.submissions_audit_trigger);
const schema = Joi.object()
.pattern(
Joi.string(),
Joi.object().pattern(
Joi.string(),
Joi.object({
trigger_by: Joi.string().required(),
slot: Joi.number().integer().required(),
votes: Joi.array().required(),
}),
),
)
.required();
const validationResult = schema.validate(taskState?.submissions_audit_trigger);
try {
expect(validationResult.error).toBeUndefined();
} catch (e) {
throw new Error("Submission audit is incorrect");
}
});
it("should make the distribution submission to k2 for dummy round 1", async () => {
const round = 1;
await taskRunner.submitDistributionList(round);
const taskState = await namespaceWrapper.getTaskState({});
const schema = Joi.object()
.pattern(
Joi.string(),
Joi.object().pattern(
Joi.string(),
Joi.object({
submission_value: Joi.string().required(),
slot: Joi.number().integer().required(),
round: Joi.number().integer().required(),
}),
),
)
.required()
.min(1);
console.log("Distribution submission", taskState?.distribution_rewards_submission);
const validationResult = schema.validate(taskState?.distribution_rewards_submission);
try {
expect(validationResult.error).toBeUndefined();
} catch (e) {
throw new Error("Distribution submission doesn't exist or is incorrect");
}
});
it("should make an audit on distribution submission", async () => {
const round = 1;
await taskRunner.auditDistribution(round);
const taskState = await namespaceWrapper.getTaskState({});
console.log("audit task", taskState?.distributions_audit_trigger);
const schema = Joi.object()
.pattern(
Joi.string(),
Joi.object().pattern(
Joi.string(),
Joi.object({
trigger_by: Joi.string().required(),
slot: Joi.number().integer().required(),
votes: Joi.array().required(),
}),
),
)
.required();
const validationResult = schema.validate(taskState?.distributions_audit_trigger);
try {
expect(validationResult.error).toBeUndefined();
} catch (e) {
throw new Error("Distribution audit is incorrect");
}
});
it("should make sure the submitted distribution list is valid", async () => {
const round = 1;
const distributionList = await namespaceWrapper.getDistributionList("", round);
console.log("Generated distribution List", JSON.parse(distributionList.toString()));
const schema = Joi.object().pattern(Joi.string().required(), Joi.number().integer().required()).required();
const validationResult = schema.validate(JSON.parse(distributionList.toString()));
console.log(validationResult);
try {
expect(validationResult.error).toBeUndefined();
} catch (e) {
throw new Error("Submitted distribution list is not valid");
}
});
it("should test the endpoint", async () => {
const response = await axios.get("http://localhost:3000");
expect(response.status).toBe(200);
expect(response.data).toEqual({ message: "Running", status: 200 });
});
it("should generate a empty distribution list when submission is 0", async () => {
const submitters: Submitter[] = [];
const bounty = Math.floor(Math.random() * 1e15) + 1;
const roundNumber = Math.floor(Math.random() * 1e5) + 1;
const distributionList = await distribution(submitters, bounty, roundNumber);
expect(distributionList).toEqual({});
});
it("should generate a distribution list contains all the submitters", async () => {
const simulatedSubmitters = 5;
const submitters: Submitter[] = [];
// 10k is the rough maximum number of submitters
for (let i = 0; i < simulatedSubmitters; i++) {
const publicKey = `mockPublicKey${i}`;
submitters.push({
publicKey,
votes: Math.floor(Math.random() * simulatedSubmitters) - 5000,
stake: Math.floor(Math.random() * 1e9) + 1,
});
}
const bounty = Math.floor(Math.random() * 1e15) + 1;
const roundNumber = 1;
const distributionList = await distribution(submitters, bounty, roundNumber);
expect(Object.keys(distributionList).length).toBe(submitters.length);
expect(Object.keys(distributionList).sort()).toEqual(submitters.map((submitter) => submitter.publicKey).sort());
});
});
afterAll(async () => {
_server.close();
});

110
worker/tests/prod-debug.ts Normal file
View File

@ -0,0 +1,110 @@
import { spawn } from "cross-spawn";
import fs from "fs";
import Debugger from "./debugger";
import { Tail } from "tail";
import path from "path";
import chalk from "chalk";
import dotenv from "dotenv";
dotenv.config();
function startWatching(): void {
console.log("Watching for file changes...");
// watch and trigger builds
build();
}
/* build and webpack the task */
function build(): void {
console.log("Building...");
const child = spawn("npm", ["run", "webpack:test"], {
stdio: "inherit",
});
child.on("close", (code: number) => {
if (code !== 0) {
console.error("Build failed");
} else {
console.log("Build successful");
copyWebpackedFile();
}
return;
});
}
/* copy the task to the Desktop Node runtime folder */
async function copyWebpackedFile(): Promise<void> {
const debugConfig = await Debugger.getConfig();
console.log("debugConfig", debugConfig);
const nodeDIR = debugConfig.nodeDir;
const sourcePath = path.join(__dirname, debugConfig.webpackedFilePath);
const desktopNodeExecutablePath = path.join(nodeDIR, debugConfig.destinationPath);
const desktopNodeLogPath = path.join(nodeDIR, debugConfig.logPath);
const keywords = debugConfig.keywords;
const taskID = debugConfig.taskID;
if (!sourcePath || !desktopNodeExecutablePath) {
console.error("Source path or destination path not specified in .env");
return;
}
console.log(`Copying webpacked file from ${sourcePath} to ${desktopNodeExecutablePath}...`);
fs.copyFile(sourcePath, desktopNodeExecutablePath, async (err) => {
if (err) {
console.error("Error copying file:", err);
} else {
console.log("File copied successfully");
tailLogs(desktopNodeLogPath, keywords, taskID);
}
});
}
/* tail logs */
async function tailLogs(desktopNodeLogPath: string, keywords: string[], taskID: string): Promise<void> {
console.log("Watching logs for messages containing ", keywords);
// Extract the directory path from the full log file path
const dirPath = path.dirname(desktopNodeLogPath);
// Check if the directory exists, create it if it doesn't
try {
await fs.promises.access(dirPath, fs.constants.F_OK);
} catch (dirErr) {
console.log(
"Unable to find task directory. Please make sure you have the correct task ID set in your .env file, and run the task on the Desktop Node before running prod-debug.",
);
process.exit(1);
}
// Ensure the log file exists, or create it if it doesn't
try {
await fs.promises.access(desktopNodeLogPath, fs.constants.F_OK);
} catch (err) {
console.log(`Log file not found, creating ${desktopNodeLogPath}`);
await fs.promises.writeFile(desktopNodeLogPath, "", { flag: "a" }); // 'a' flag ensures the file is created if it doesn't exist and not overwritten if it exists
}
let tail = new Tail(desktopNodeLogPath, {
separator: "\n",
flushAtEOF: true,
});
console.warn(
`Now watching logs for messages containing ${keywords.join(",")}. Please start the task ${taskID} and keep it running on the Desktop Node.`,
);
tail.on("line", (data: string) => {
if (keywords.some((keyword) => data.includes(keyword))) {
console.log(chalk.magenta(data));
} else {
console.log(data);
}
});
+tail.on("error", (error: Error) => {
console.log("ERROR: ", error);
});
}
startWatching();

View File

@ -0,0 +1,84 @@
import { taskRunner } from "@_koii/task-manager";
import "../src/index.js";
import { namespaceWrapper } from "@_koii/task-manager/namespace-wrapper";
import { Keypair } from "@_koii/web3.js";
const numRounds = parseInt(process.argv[2]) || 1;
const roundDelay = parseInt(process.argv[3]) || 5000;
const functionDelay = parseInt(process.argv[4]) || 1000;
let TASK_TIMES: number[] = [];
let SUBMISSION_TIMES: number[] = [];
let AUDIT_TIMES: number[] = [];
function sleep(ms: number) {
return new Promise((resolve) => setTimeout(resolve, ms));
}
async function executeTasks() {
const keypair = Keypair.generate();
await namespaceWrapper.stakeOnChain(keypair.publicKey, keypair, keypair.publicKey, 10000);
for (let round = 0; round < numRounds; round++) {
const taskStartTime = Date.now();
await taskRunner.task(round);
const taskEndTime = Date.now();
TASK_TIMES.push(taskEndTime - taskStartTime);
await sleep(functionDelay);
const taskSubmissionStartTime = Date.now();
await taskRunner.submitTask(round);
const taskSubmissionEndTime = Date.now();
SUBMISSION_TIMES.push(taskSubmissionEndTime - taskSubmissionStartTime);
await sleep(functionDelay);
const auditStartTime = Date.now();
await taskRunner.auditTask(round);
const auditEndTime = Date.now();
AUDIT_TIMES.push(auditEndTime - auditStartTime);
await sleep(functionDelay);
await taskRunner.selectAndGenerateDistributionList(round);
await sleep(functionDelay);
await taskRunner.auditDistribution(round);
if (round < numRounds - 1) {
await sleep(roundDelay);
}
}
console.log("TIME METRICS BELOW");
function metrics(name: string, times: number[]) {
const average = (arr: number[]) => arr.reduce((a, b) => a + b, 0) / arr.length;
const formatTime = (ms: number) => (ms / 1000).toFixed(4);
const formatSlot = (ms: number) => Math.ceil(ms / 408);
const min = Math.min(...times);
const max = Math.max(...times);
const avg = average(times);
const timeMin = formatTime(min);
const timeMax = formatTime(max);
const timeAvg = formatTime(avg);
const slotMin = formatSlot(min);
const slotMax = formatSlot(max);
const slotAvg = formatSlot(avg);
return {
Metric: `SIMULATED ${name} WINDOW`,
"Avg Time (s)": timeAvg,
"Avg Slots": slotAvg,
"Min Time (s)": timeMin,
"Min Slots": slotMin,
"Max Time (s)": timeMax,
"Max Slots": slotMax,
};
}
const timeMetrics = metrics("TASK", TASK_TIMES);
const submissionMetrics = metrics("SUBMISSION", SUBMISSION_TIMES);
const auditMetrics = metrics("AUDIT", AUDIT_TIMES);
console.table([timeMetrics, submissionMetrics, auditMetrics]);
console.log("All tasks executed. Test completed.");
process.exit(0);
}
setTimeout(executeTasks, 1500);

View File

View File

@ -0,0 +1,51 @@
"""Test stage for auditing summary."""
import requests
from prometheus_test import Context
async def prepare(context: Context, target_name: str):
"""Prepare for auditing summary."""
staking_key = context.env.get("WORKER_ID")
target_submission = await context.storeGet(f"submission-{target_name}")
return {
"staking_key": staking_key,
"round_number": context.round_number,
"target_submission": target_submission,
"target_name": target_name,
}
async def execute(context: Context, prepare_data: dict):
"""Execute summary audit test."""
staking_key = prepare_data["staking_key"]
round_number = prepare_data["round_number"]
target_submission = prepare_data["target_submission"]
target_name = prepare_data["target_name"]
# Mock response for audit
response = requests.post(
"http://localhost:5000/api/builder/summarizer/audit",
json={
"taskId": context.config.task_id,
"roundNumber": round_number,
"stakingKey": staking_key,
"submitterKey": target_name,
"cid": target_submission.get("cid"),
"prUrl": target_submission.get("pr_url"),
"githubUsername": target_submission.get("github_username"),
},
)
if response.status_code != 200:
raise Exception(f"Failed to audit summary: {response.text}")
result = response.json()
if not result.get("success"):
raise Exception("Failed to audit summary")
# Store audit result
await context.storeSet(f"audit-{staking_key}-{target_name}", result.get("data"))
return True

View File

@ -0,0 +1,39 @@
"""Test stage for fetching summarizer todo."""
import requests
from prometheus_test import Context
async def prepare(context: Context):
"""Prepare for fetching summarizer todo."""
return {
"staking_key": context.env.get("WORKER_ID"),
"round_number": context.round_number,
}
async def execute(context: Context, prepare_data: dict):
"""Execute fetch summarizer todo test."""
staking_key = prepare_data["staking_key"]
round_number = prepare_data["round_number"]
# Mock response for fetching todo
response = requests.post(
"http://localhost:5000/api/builder/summarizer/fetch-summarizer-todo",
json={
"stakingKey": staking_key,
"roundNumber": round_number,
},
)
if response.status_code != 200:
raise Exception(f"Failed to fetch summarizer todo: {response.text}")
result = response.json()
if not result.get("success"):
raise Exception("Failed to fetch summarizer todo")
# Store todo data for next steps
await context.storeSet(f"todo-{staking_key}", result.get("data"))
return True

View File

@ -0,0 +1,47 @@
"""Test stage for generating repository summary."""
import requests
from prometheus_test import Context
async def prepare(context: Context):
"""Prepare for generating summary."""
staking_key = context.env.get("WORKER_ID")
todo = await context.storeGet(f"todo-{staking_key}")
return {
"staking_key": staking_key,
"round_number": context.round_number,
"repo_owner": todo.get("repo_owner"),
"repo_name": todo.get("repo_name"),
}
async def execute(context: Context, prepare_data: dict):
"""Execute summary generation test."""
staking_key = prepare_data["staking_key"]
round_number = prepare_data["round_number"]
repo_owner = prepare_data["repo_owner"]
repo_name = prepare_data["repo_name"]
# Mock response for repo summary generation
response = requests.post(
"http://localhost:5000/api/builder/summarizer/generate-summary",
json={
"taskId": context.config.task_id,
"round_number": str(round_number),
"repo_url": f"https://github.com/{repo_owner}/{repo_name}",
},
)
if response.status_code != 200:
raise Exception(f"Failed to generate summary: {response.text}")
result = response.json()
if not result.get("success"):
raise Exception("Failed to generate summary")
# Store PR URL for next steps
await context.storeSet(f"pr-{staking_key}", result.get("data", {}).get("pr_url"))
return True

View File

@ -0,0 +1,56 @@
"""Test stage for submitting summary."""
import requests
from prometheus_test import Context
async def prepare(context: Context):
"""Prepare for submitting summary."""
staking_key = context.env.get("WORKER_ID")
pr_url = await context.storeGet(f"pr-{staking_key}")
return {
"staking_key": staking_key,
"round_number": context.round_number,
"pr_url": pr_url,
"github_username": context.env.get("GITHUB_USERNAME"),
}
async def execute(context: Context, prepare_data: dict):
"""Execute summary submission test."""
staking_key = prepare_data["staking_key"]
round_number = prepare_data["round_number"]
pr_url = prepare_data["pr_url"]
github_username = prepare_data["github_username"]
# Mock response for submission
response = requests.post(
"http://localhost:5000/api/builder/summarizer/submit",
json={
"taskId": context.config.task_id,
"roundNumber": round_number,
"prUrl": pr_url,
"stakingKey": staking_key,
"githubUsername": github_username,
},
)
if response.status_code != 200:
raise Exception(f"Failed to submit summary: {response.text}")
result = response.json()
if not result.get("success"):
raise Exception("Failed to submit summary")
# Store submission data for audit
await context.storeSet(
f"submission-{staking_key}",
{
"cid": result.get("data", {}).get("cid"),
"pr_url": pr_url,
"github_username": github_username,
},
)
return True

View File

@ -0,0 +1,31 @@
"""Test stage for validating API keys."""
import requests
from prometheus_test import Context
async def prepare(context: Context):
"""Prepare for API key validation test."""
return {
"api_key": context.env.get("ANTHROPIC_API_KEY"),
}
async def execute(context: Context, prepare_data: dict):
"""Execute API key validation test."""
api_key = prepare_data["api_key"]
# Mock response for Anthropic API validation
response = requests.post(
"http://localhost:5000/api/builder/summarizer/validate-api-key",
json={"api_key": api_key},
)
if response.status_code != 200:
raise Exception(f"API key validation failed: {response.text}")
result = response.json()
if not result.get("valid"):
raise Exception("API key is not valid")
return True

View File

@ -0,0 +1,33 @@
"""Test stage for validating GitHub credentials."""
import requests
from prometheus_test import Context
async def prepare(context: Context):
"""Prepare for GitHub validation test."""
return {
"github_username": context.env.get("GITHUB_USERNAME"),
"github_token": context.env.get("GITHUB_TOKEN"),
}
async def execute(context: Context, prepare_data: dict):
"""Execute GitHub validation test."""
username = prepare_data["github_username"]
token = prepare_data["github_token"]
# Mock response for GitHub validation
response = requests.post(
"http://localhost:5000/api/builder/summarizer/validate-github",
json={"username": username, "token": token},
)
if response.status_code != 200:
raise Exception(f"GitHub validation failed: {response.text}")
result = response.json()
if not result.get("valid"):
raise Exception("GitHub credentials are not valid")
return True

85
worker/tests/steps.py Normal file
View File

@ -0,0 +1,85 @@
"""Test step definitions."""
from prometheus_test import TestStep
from functools import partial
from .stages import (
validate_api_keys,
validate_github,
fetch_summarizer_todo,
generate_summary,
submit_summary,
audit_summary,
)
steps = [
TestStep(
name="validate_api_keys",
description="Validate Anthropic API key",
prepare=validate_api_keys.prepare,
execute=validate_api_keys.execute,
worker="worker1",
),
TestStep(
name="validate_github",
description="Validate GitHub credentials",
prepare=validate_github.prepare,
execute=validate_github.execute,
worker="worker1",
),
TestStep(
name="fetch_todo_worker1",
description="Fetch summarizer todo for worker1",
prepare=fetch_summarizer_todo.prepare,
execute=fetch_summarizer_todo.execute,
worker="worker1",
),
TestStep(
name="fetch_todo_worker2",
description="Fetch summarizer todo for worker2",
prepare=fetch_summarizer_todo.prepare,
execute=fetch_summarizer_todo.execute,
worker="worker2",
),
TestStep(
name="generate_summary_worker1",
description="Generate summary for worker1's todo",
prepare=generate_summary.prepare,
execute=generate_summary.execute,
worker="worker1",
),
TestStep(
name="generate_summary_worker2",
description="Generate summary for worker2's todo",
prepare=generate_summary.prepare,
execute=generate_summary.execute,
worker="worker2",
),
TestStep(
name="submit_summary_worker1",
description="Submit summary for worker1",
prepare=submit_summary.prepare,
execute=submit_summary.execute,
worker="worker1",
),
TestStep(
name="submit_summary_worker2",
description="Submit summary for worker2",
prepare=submit_summary.prepare,
execute=submit_summary.execute,
worker="worker2",
),
TestStep(
name="audit_worker1",
description="Worker1 audits Worker2's submission",
prepare=partial(audit_summary.prepare, target_name="worker2"),
execute=audit_summary.execute,
worker="worker1",
),
TestStep(
name="audit_worker2",
description="Worker2 audits Worker1's submission",
prepare=partial(audit_summary.prepare, target_name="worker1"),
execute=audit_summary.execute,
worker="worker2",
),
]

19
worker/tests/test.ts Normal file
View File

@ -0,0 +1,19 @@
// async function testSlackWebhook(){
// const slackResponse = await fetch('https://hooks.slack.com/services/', {
// method: "POST",
// headers: {
// "Content-Type": "application/json",
// },
// body: JSON.stringify({
// text: `[TASK] Error summarizing issue:\n ${JSON.stringify({
// status: "error",
// data: {
// message: "test"
// }
// })}`
// }),
// });
// console.log("[TASK] slackResponse: ", slackResponse);
// }
// testSlackWebhook();

File diff suppressed because it is too large Load Diff

225
worker/tests/wasm/bincode_js.d.ts vendored Normal file
View File

@ -0,0 +1,225 @@
/* tslint:disable */
/* eslint-disable */
/**
* @param {any} val
* @returns {any}
*/
export function bincode_js_deserialize(val: any): any;
/**
* @param {any} val
* @returns {any}
*/
export function borsh_bpf_js_deserialize(val: any): any;
/**
* Initialize Javascript logging and panic handler
*/
export function solana_program_init(): void;
/**
* A hash; the 32-byte output of a hashing algorithm.
*
* This struct is used most often in `solana-sdk` and related crates to contain
* a [SHA-256] hash, but may instead contain a [blake3] hash, as created by the
* [`blake3`] module (and used in [`Message::hash`]).
*
* [SHA-256]: https://en.wikipedia.org/wiki/SHA-2
* [blake3]: https://github.com/BLAKE3-team/BLAKE3
* [`blake3`]: crate::blake3
* [`Message::hash`]: crate::message::Message::hash
*/
export class Hash {
free(): void;
/**
* Create a new Hash object
*
* * `value` - optional hash as a base58 encoded string, `Uint8Array`, `[number]`
* @param {any} value
*/
constructor(value: any);
/**
* Return the base58 string representation of the hash
* @returns {string}
*/
toString(): string;
/**
* Checks if two `Hash`s are equal
* @param {Hash} other
* @returns {boolean}
*/
equals(other: Hash): boolean;
/**
* Return the `Uint8Array` representation of the hash
* @returns {Uint8Array}
*/
toBytes(): Uint8Array;
}
/**
* A directive for a single invocation of a Solana program.
*
* An instruction specifies which program it is calling, which accounts it may
* read or modify, and additional data that serves as input to the program. One
* or more instructions are included in transactions submitted by Solana
* clients. Instructions are also used to describe [cross-program
* invocations][cpi].
*
* [cpi]: https://docs.solana.com/developing/programming-model/calling-between-programs
*
* During execution, a program will receive a list of account data as one of
* its arguments, in the same order as specified during `Instruction`
* construction.
*
* While Solana is agnostic to the format of the instruction data, it has
* built-in support for serialization via [`borsh`] and [`bincode`].
*
* [`borsh`]: https://docs.rs/borsh/latest/borsh/
* [`bincode`]: https://docs.rs/bincode/latest/bincode/
*
* # Specifying account metadata
*
* When constructing an [`Instruction`], a list of all accounts that may be
* read or written during the execution of that instruction must be supplied as
* [`AccountMeta`] values.
*
* Any account whose data may be mutated by the program during execution must
* be specified as writable. During execution, writing to an account that was
* not specified as writable will cause the transaction to fail. Writing to an
* account that is not owned by the program will cause the transaction to fail.
*
* Any account whose lamport balance may be mutated by the program during
* execution must be specified as writable. During execution, mutating the
* lamports of an account that was not specified as writable will cause the
* transaction to fail. While _subtracting_ lamports from an account not owned
* by the program will cause the transaction to fail, _adding_ lamports to any
* account is allowed, as long is it is mutable.
*
* Accounts that are not read or written by the program may still be specified
* in an `Instruction`'s account list. These will affect scheduling of program
* execution by the runtime, but will otherwise be ignored.
*
* When building a transaction, the Solana runtime coalesces all accounts used
* by all instructions in that transaction, along with accounts and permissions
* required by the runtime, into a single account list. Some accounts and
* account permissions required by the runtime to process a transaction are
* _not_ required to be included in an `Instruction`s account list. These
* include:
*
* - The program ID &mdash; it is a separate field of `Instruction`
* - The transaction's fee-paying account &mdash; it is added during [`Message`]
* construction. A program may still require the fee payer as part of the
* account list if it directly references it.
*
* [`Message`]: crate::message::Message
*
* Programs may require signatures from some accounts, in which case they
* should be specified as signers during `Instruction` construction. The
* program must still validate during execution that the account is a signer.
*/
export class Instruction {
free(): void;
}
/**
*/
export class Instructions {
free(): void;
/**
*/
constructor();
/**
* @param {Instruction} instruction
*/
push(instruction: Instruction): void;
}
/**
* A Solana transaction message (legacy).
*
* See the [`message`] module documentation for further description.
*
* [`message`]: crate::message
*
* Some constructors accept an optional `payer`, the account responsible for
* paying the cost of executing a transaction. In most cases, callers should
* specify the payer explicitly in these constructors. In some cases though,
* the caller is not _required_ to specify the payer, but is still allowed to:
* in the `Message` structure, the first account is always the fee-payer, so if
* the caller has knowledge that the first account of the constructed
* transaction's `Message` is both a signer and the expected fee-payer, then
* redundantly specifying the fee-payer is not strictly required.
*/
export class Message {
free(): void;
/**
* The id of a recent ledger entry.
*/
recent_blockhash: Hash;
}
/**
* The address of a [Solana account][acc].
*
* Some account addresses are [ed25519] public keys, with corresponding secret
* keys that are managed off-chain. Often, though, account addresses do not
* have corresponding secret keys &mdash; as with [_program derived
* addresses_][pdas] &mdash; or the secret key is not relevant to the operation
* of a program, and may have even been disposed of. As running Solana programs
* can not safely create or manage secret keys, the full [`Keypair`] is not
* defined in `solana-program` but in `solana-sdk`.
*
* [acc]: https://docs.solana.com/developing/programming-model/accounts
* [ed25519]: https://ed25519.cr.yp.to/
* [pdas]: https://docs.solana.com/developing/programming-model/calling-between-programs#program-derived-addresses
* [`Keypair`]: https://docs.rs/solana-sdk/latest/solana_sdk/signer/keypair/struct.Keypair.html
*/
export class Pubkey {
free(): void;
/**
* Create a new Pubkey object
*
* * `value` - optional public key as a base58 encoded string, `Uint8Array`, `[number]`
* @param {any} value
*/
constructor(value: any);
/**
* Return the base58 string representation of the public key
* @returns {string}
*/
toString(): string;
/**
* Check if a `Pubkey` is on the ed25519 curve.
* @returns {boolean}
*/
isOnCurve(): boolean;
/**
* Checks if two `Pubkey`s are equal
* @param {Pubkey} other
* @returns {boolean}
*/
equals(other: Pubkey): boolean;
/**
* Return the `Uint8Array` representation of the public key
* @returns {Uint8Array}
*/
toBytes(): Uint8Array;
/**
* Derive a Pubkey from another Pubkey, string seed, and a program id
* @param {Pubkey} base
* @param {string} seed
* @param {Pubkey} owner
* @returns {Pubkey}
*/
static createWithSeed(base: Pubkey, seed: string, owner: Pubkey): Pubkey;
/**
* Derive a program address from seeds and a program id
* @param {any[]} seeds
* @param {Pubkey} program_id
* @returns {Pubkey}
*/
static createProgramAddress(seeds: any[], program_id: Pubkey): Pubkey;
/**
* Find a valid program address
*
* Returns:
* * `[PubKey, number]` - the program address and bump seed
* @param {any[]} seeds
* @param {Pubkey} program_id
* @returns {any}
*/
static findProgramAddress(seeds: any[], program_id: Pubkey): any;
}

Binary file not shown.

View File

@ -0,0 +1,44 @@
/* tslint:disable */
/* eslint-disable */
export const memory: WebAssembly.Memory;
export function bincode_js_deserialize(a: number): number;
export function borsh_bpf_js_deserialize(a: number): number;
export function __wbg_message_free(a: number): void;
export function __wbg_get_message_recent_blockhash(a: number): number;
export function __wbg_set_message_recent_blockhash(a: number, b: number): void;
export function solana_program_init(): void;
export function systeminstruction_createAccount(a: number, b: number, c: number, d: number, e: number): number;
export function systeminstruction_createAccountWithSeed(a: number, b: number, c: number, d: number, e: number, f: number, g: number, h: number): number;
export function systeminstruction_assign(a: number, b: number): number;
export function systeminstruction_assignWithSeed(a: number, b: number, c: number, d: number, e: number): number;
export function systeminstruction_transfer(a: number, b: number, c: number): number;
export function systeminstruction_transferWithSeed(a: number, b: number, c: number, d: number, e: number, f: number, g: number): number;
export function systeminstruction_allocate(a: number, b: number): number;
export function systeminstruction_allocateWithSeed(a: number, b: number, c: number, d: number, e: number, f: number): number;
export function systeminstruction_createNonceAccount(a: number, b: number, c: number, d: number): number;
export function systeminstruction_advanceNonceAccount(a: number, b: number): number;
export function systeminstruction_withdrawNonceAccount(a: number, b: number, c: number, d: number): number;
export function systeminstruction_authorizeNonceAccount(a: number, b: number, c: number): number;
export function __wbg_instruction_free(a: number): void;
export function pubkey_constructor(a: number, b: number): void;
export function pubkey_toString(a: number, b: number): void;
export function pubkey_isOnCurve(a: number): number;
export function pubkey_equals(a: number, b: number): number;
export function pubkey_toBytes(a: number, b: number): void;
export function pubkey_createWithSeed(a: number, b: number, c: number, d: number, e: number): void;
export function pubkey_createProgramAddress(a: number, b: number, c: number, d: number): void;
export function pubkey_findProgramAddress(a: number, b: number, c: number, d: number): void;
export function __wbg_instructions_free(a: number): void;
export function instructions_constructor(): number;
export function instructions_push(a: number, b: number): void;
export function hash_constructor(a: number, b: number): void;
export function hash_toString(a: number, b: number): void;
export function hash_equals(a: number, b: number): number;
export function hash_toBytes(a: number, b: number): void;
export function __wbg_pubkey_free(a: number): void;
export function __wbg_hash_free(a: number): void;
export function __wbindgen_malloc(a: number, b: number): number;
export function __wbindgen_realloc(a: number, b: number, c: number, d: number): number;
export function __wbindgen_add_to_stack_pointer(a: number): number;
export function __wbindgen_free(a: number, b: number, c: number): void;
export function __wbindgen_exn_store(a: number): void;

BIN
worker/tests/wasm/zstd.wasm Executable file

Binary file not shown.

View File

@ -0,0 +1,31 @@
import path from 'path'
import Dotenv from 'dotenv-webpack'
import { fileURLToPath } from "url";
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
export default {
mode: "development",
entry: "./src/index.ts",
output: {
filename: "main.js",
path: path.resolve(__dirname, "dist"),
libraryTarget: "commonjs2",
},
target: "node",
resolve: {
extensions: [".ts", ".js"],
},
module: {
rules: [
{
test: /\.ts$/,
use: "ts-loader",
exclude: /node_modules/,
},
],
},
devtool: "source-map",
plugins: [new Dotenv()],
};

29
worker/tests/workers.json Normal file
View File

@ -0,0 +1,29 @@
{
"worker1": {
"port": 5001,
"env": {
"WORKER_ID": "worker1",
"ANTHROPIC_API_KEY": "test_key",
"GITHUB_USERNAME": "test_user",
"GITHUB_TOKEN": "test_token"
}
},
"worker2": {
"port": 5002,
"env": {
"WORKER_ID": "worker2",
"ANTHROPIC_API_KEY": "test_key",
"GITHUB_USERNAME": "test_user",
"GITHUB_TOKEN": "test_token"
}
},
"leader": {
"port": 5000,
"env": {
"WORKER_ID": "leader",
"ANTHROPIC_API_KEY": "test_key",
"GITHUB_USERNAME": "test_user",
"GITHUB_TOKEN": "test_token"
}
}
}