chore: apply prettier

This commit is contained in:
2025-05-05 22:51:21 -03:00
parent 290bebb112
commit 3f10aacb72
26 changed files with 2126 additions and 2051 deletions

View File

@ -1,26 +1,26 @@
# Earn Crypto with AI Agents: Prometheus Document & Summarize Task (Beta v0)
## Overview
The **Prometheus Document & Summarize Task** spins up an **AI agent** capable of continuously summarizing repositories, **earning you KOII**. Automated document summarization agents can constantly process and summarize information, increasing the value of the network _and_ your node. Our ultimate goal is to have **AI agents summarizing Koii tasks**, growing the network with **more opportunities for node operators to earn rewards**.
## Releases
### Beta v0
- This is the **first beta release** of the task.
- The AI agent reads documents and generates summaries automatically.
- Documentations are sent to the user repository.
- Future versions will introduce **enhanced AI logic, more complex summarization tasks, and more!**
## Task Setup
**[How to set up a Claude API key and a GitHub API key for the 247 Document & Summarize Task.](https://www.koii.network/blog/Earn-Crypto-With-AI-Agent)**
## How It Works
1. The Koii Node **launches an AI agent** inside a lightweight runtime.
2. The agent reads an active **repository list** from the bounty repository.
3. It picks a **repository**, generates the necessary **documentation**, and submits a **Github pull request** (a request to have its documentation added to the repository).
4. The agent will create a new submission to the repository each round (approximately every hour).
5. Koii Nodes **earn rewards** for running the AI agent and contributing documentation.
# Earn Crypto with AI Agents: Prometheus Document & Summarize Task (Beta v0)
## Overview
The **Prometheus Document & Summarize Task** spins up an **AI agent** capable of continuously summarizing repositories, **earning you KOII**. Automated document summarization agents can constantly process and summarize information, increasing the value of the network _and_ your node. Our ultimate goal is to have **AI agents summarizing Koii tasks**, growing the network with **more opportunities for node operators to earn rewards**.
## Releases
### Beta v0
- This is the **first beta release** of the task.
- The AI agent reads documents and generates summaries automatically.
- Documentations are sent to the user repository.
- Future versions will introduce **enhanced AI logic, more complex summarization tasks, and more!**
## Task Setup
**[How to set up a Claude API key and a GitHub API key for the 247 Document & Summarize Task.](https://www.koii.network/blog/Earn-Crypto-With-AI-Agent)**
## How It Works
1. The Koii Node **launches an AI agent** inside a lightweight runtime.
2. The agent reads an active **repository list** from the bounty repository.
3. It picks a **repository**, generates the necessary **documentation**, and submits a **Github pull request** (a request to have its documentation added to the repository).
4. The agent will create a new submission to the repository each round (approximately every hour).
5. Koii Nodes **earn rewards** for running the AI agent and contributing documentation.

View File

@ -1,130 +1,130 @@
######################## ALL FIELDS ARE REQUIRED UNLESS OTHERWISE NOTED #########################
######################################### TASK METADATA #########################################
############################ Will be displayed in the desktop node ##############################
## Task Name ##
# Maximum 24 characters.
task_name: "Prometheus Docs Agent"
## Task Author ##
author: "Prometheus"
# Task Description Markdown ##
# If you specify a markdown file, the description field will be ignored.
# Markdown is recommended for better formatting.
markdownDescriptionPath: "./README.md"
## Task Description ##
# Ignored if you specify a markdown file.
description: "Task description."
## Repository URL ##
# Must be public for whitelisted tasks.
repositoryUrl: "https://github.com/koii-network/builder-247"
## Image URL ##
# 230x86 pixels.
imageUrl: "https://koii-k2-task-metadata.s3.us-east-2.amazonaws.com/Docs.png"
## Info URL ##
infoUrl: "https://www.koii.network/blog/Earn-Crypto-With-AI-Agent"
####################################### TASK CONFIGURATION ######################################
## Task Executable Network ##
# IPFS or DEVELOPMENT
# Keep this as IPFS unless you know you need to change it.
task_executable_network: "IPFS"
## Task Audit Program ##
# Task Executable Network IPFS: Path to your executable.
# Task Executable Network DEVELOPMENT: The value should be 'main'.
# Keep this as-is unless you know you need to change it.
task_audit_program: "dist/main.js"
## Round Time ##
# Duration of task, measured in slots (with each slot approximately equal to 408ms). Should be at least 800 slots.
# See https://www.koii.network/docs/concepts/what-are-tasks/what-are-tasks/gradual-consensus for more information on how round time, audit window, and submission window work.
round_time: 3000
## Audit Window ##
# The audit window should be at least 1/3 of the round time.
audit_window: 1300
## Submission Window ##
# The submission window should be at least 1/3 of the round time.
submission_window: 1300
## Minimum Stake Amount ##
# The minimum amount of KOII or KPL that a user must stake in order to participate in the task.
minimum_stake_amount: 0.01
## Task Bounty Type ##
# KOII or KPL
task_type: "KOII"
## Token Mint Address (ONLY for KPL tasks) ##
# The Fire Token address is provided as an example.
token_type: "4qayyw53kWz6GzypcejjT1cvwMXS1qYLSMQRE8se3gTv"
## Total Bounty Amount ##
# The total bounty amount that will be available for distribution over all rounds.
# Does nothing when updating a task.
total_bounty_amount: 12000
## Bounty Amount per Round ##
# The maximum amount that can be distributed per round.
# If the actual distribution per round exceeds this amount, the distribution list will fail.
bounty_amount_per_round: 2001
## Allowed Failed Distributions ##
# Number of retries allowed for the distribution list if it is fails audit.
# If all retries fail, the task will not distribute anything for the round.
# This is also the number of rounds of submissions it will keep.
allowed_failed_distributions: 8
## Space ##
# Expected Task Data Size in MBs for the account size.
# Minimums: 2 for whitelisted tasks, 1 for production, 0.1 for testing.
# See https://www.koii.network/docs/develop/command-line-tool/create-task-cli/create-task#space for calculation details.
space: 5
## Requirement Tags (Optional) ##
# To add more global variables and task variables, please refer to the type, value, description format shown below.
# The ORCA_TASK addon is REQUIRED
requirementsTags:
- type: ADDON
value: "ORCA_TASK"
- type: CPU
value: "4-core"
- type: RAM
value: "5 GB"
- type: STORAGE
value: "5 GB"
- type: TASK_VARIABLE
value: "ANTHROPIC_API_KEY"
description: "Your Anthropic API key. You can get one here: https://console.anthropic.com/settings/keys"
- type: TASK_VARIABLE
value: "GITHUB_USERNAME"
description: "Your GitHub username. You can sign up for an account here: https://github.com/join"
- type: TASK_VARIABLE
value: "GITHUB_TOKEN"
description: "Your GitHub Personal Access Token. You can create one here: https://github.com/settings/tokens"
## Tags ##
# See https://www.koii.network/docs/develop/command-line-tool/create-task-cli/create-task#tags for available tag options.
tags: ["AI"]
# Environment ##
# TEST or PRODUCTION
# Production mode will expose your task to all the task runners, even if not whitelisted.
environment: "TEST"
#################################### FOR UPDATING TASKS ONLY ####################################
## Old Task ID ##
task_id: "5bc74eTjGgNigupFBZXtfzAYVksPqSGBEVgRLubk7ak7"
## Migration Description ##
migrationDescription: "Log Reminder, Time Based Logic"
######################## ALL FIELDS ARE REQUIRED UNLESS OTHERWISE NOTED #########################
######################################### TASK METADATA #########################################
############################ Will be displayed in the desktop node ##############################
## Task Name ##
# Maximum 24 characters.
task_name: "Prometheus Docs Agent"
## Task Author ##
author: "Prometheus"
# Task Description Markdown ##
# If you specify a markdown file, the description field will be ignored.
# Markdown is recommended for better formatting.
markdownDescriptionPath: "./README.md"
## Task Description ##
# Ignored if you specify a markdown file.
description: "Task description."
## Repository URL ##
# Must be public for whitelisted tasks.
repositoryUrl: "https://github.com/koii-network/builder-247"
## Image URL ##
# 230x86 pixels.
imageUrl: "https://koii-k2-task-metadata.s3.us-east-2.amazonaws.com/Docs.png"
## Info URL ##
infoUrl: "https://www.koii.network/blog/Earn-Crypto-With-AI-Agent"
####################################### TASK CONFIGURATION ######################################
## Task Executable Network ##
# IPFS or DEVELOPMENT
# Keep this as IPFS unless you know you need to change it.
task_executable_network: "IPFS"
## Task Audit Program ##
# Task Executable Network IPFS: Path to your executable.
# Task Executable Network DEVELOPMENT: The value should be 'main'.
# Keep this as-is unless you know you need to change it.
task_audit_program: "dist/main.js"
## Round Time ##
# Duration of task, measured in slots (with each slot approximately equal to 408ms). Should be at least 800 slots.
# See https://www.koii.network/docs/concepts/what-are-tasks/what-are-tasks/gradual-consensus for more information on how round time, audit window, and submission window work.
round_time: 3000
## Audit Window ##
# The audit window should be at least 1/3 of the round time.
audit_window: 1300
## Submission Window ##
# The submission window should be at least 1/3 of the round time.
submission_window: 1300
## Minimum Stake Amount ##
# The minimum amount of KOII or KPL that a user must stake in order to participate in the task.
minimum_stake_amount: 0.01
## Task Bounty Type ##
# KOII or KPL
task_type: "KOII"
## Token Mint Address (ONLY for KPL tasks) ##
# The Fire Token address is provided as an example.
token_type: "4qayyw53kWz6GzypcejjT1cvwMXS1qYLSMQRE8se3gTv"
## Total Bounty Amount ##
# The total bounty amount that will be available for distribution over all rounds.
# Does nothing when updating a task.
total_bounty_amount: 12000
## Bounty Amount per Round ##
# The maximum amount that can be distributed per round.
# If the actual distribution per round exceeds this amount, the distribution list will fail.
bounty_amount_per_round: 2001
## Allowed Failed Distributions ##
# Number of retries allowed for the distribution list if it is fails audit.
# If all retries fail, the task will not distribute anything for the round.
# This is also the number of rounds of submissions it will keep.
allowed_failed_distributions: 8
## Space ##
# Expected Task Data Size in MBs for the account size.
# Minimums: 2 for whitelisted tasks, 1 for production, 0.1 for testing.
# See https://www.koii.network/docs/develop/command-line-tool/create-task-cli/create-task#space for calculation details.
space: 5
## Requirement Tags (Optional) ##
# To add more global variables and task variables, please refer to the type, value, description format shown below.
# The ORCA_TASK addon is REQUIRED
requirementsTags:
- type: ADDON
value: "ORCA_TASK"
- type: CPU
value: "4-core"
- type: RAM
value: "5 GB"
- type: STORAGE
value: "5 GB"
- type: TASK_VARIABLE
value: "ANTHROPIC_API_KEY"
description: "Your Anthropic API key. You can get one here: https://console.anthropic.com/settings/keys"
- type: TASK_VARIABLE
value: "GITHUB_USERNAME"
description: "Your GitHub username. You can sign up for an account here: https://github.com/join"
- type: TASK_VARIABLE
value: "GITHUB_TOKEN"
description: "Your GitHub Personal Access Token. You can create one here: https://github.com/settings/tokens"
## Tags ##
# See https://www.koii.network/docs/develop/command-line-tool/create-task-cli/create-task#tags for available tag options.
tags: ["AI"]
# Environment ##
# TEST or PRODUCTION
# Production mode will expose your task to all the task runners, even if not whitelisted.
environment: "TEST"
#################################### FOR UPDATING TASKS ONLY ####################################
## Old Task ID ##
task_id: "5bc74eTjGgNigupFBZXtfzAYVksPqSGBEVgRLubk7ak7"
## Migration Description ##
migrationDescription: "Log Reminder, Time Based Logic"

View File

@ -1,130 +1,130 @@
######################## ALL FIELDS ARE REQUIRED UNLESS OTHERWISE NOTED #########################
######################################### TASK METADATA #########################################
############################ Will be displayed in the desktop node ##############################
## Task Name ##
# Maximum 24 characters.
task_name: "Prometheus Docs Agent"
## Task Author ##
author: "Prometheus"
# Task Description Markdown ##
# If you specify a markdown file, the description field will be ignored.
# Markdown is recommended for better formatting.
markdownDescriptionPath: "./README.md"
## Task Description ##
# Ignored if you specify a markdown file.
description: "Task description."
## Repository URL ##
# Must be public for whitelisted tasks.
repositoryUrl: "https://github.com/koii-network/builder-247"
## Image URL ##
# 230x86 pixels.
imageUrl: "https://koii-k2-task-metadata.s3.us-east-2.amazonaws.com/Docs.png"
## Info URL ##
infoUrl: "https://www.koii.network/blog/Earn-Crypto-With-AI-Agent"
####################################### TASK CONFIGURATION ######################################
## Task Executable Network ##
# IPFS or DEVELOPMENT
# Keep this as IPFS unless you know you need to change it.
task_executable_network: "IPFS"
## Task Audit Program ##
# Task Executable Network IPFS: Path to your executable.
# Task Executable Network DEVELOPMENT: The value should be 'main'.
# Keep this as-is unless you know you need to change it.
task_audit_program: "dist/main.js"
## Round Time ##
# Duration of task, measured in slots (with each slot approximately equal to 408ms). Should be at least 800 slots.
# See https://www.koii.network/docs/concepts/what-are-tasks/what-are-tasks/gradual-consensus for more information on how round time, audit window, and submission window work.
round_time: 3000
## Audit Window ##
# The audit window should be at least 1/3 of the round time.
audit_window: 1300
## Submission Window ##
# The submission window should be at least 1/3 of the round time.
submission_window: 1300
## Minimum Stake Amount ##
# The minimum amount of KOII or KPL that a user must stake in order to participate in the task.
minimum_stake_amount: 0.01
## Task Bounty Type ##
# KOII or KPL
task_type: "KOII"
## Token Mint Address (ONLY for KPL tasks) ##
# The Fire Token address is provided as an example.
token_type: "4qayyw53kWz6GzypcejjT1cvwMXS1qYLSMQRE8se3gTv"
## Total Bounty Amount ##
# The total bounty amount that will be available for distribution over all rounds.
# Does nothing when updating a task.
total_bounty_amount: 11
## Bounty Amount per Round ##
# The maximum amount that can be distributed per round.
# If the actual distribution per round exceeds this amount, the distribution list will fail.
bounty_amount_per_round: 1
## Allowed Failed Distributions ##
# Number of retries allowed for the distribution list if it is fails audit.
# If all retries fail, the task will not distribute anything for the round.
# This is also the number of rounds of submissions it will keep.
allowed_failed_distributions: 8
## Space ##
# Expected Task Data Size in MBs for the account size.
# Minimums: 2 for whitelisted tasks, 1 for production, 0.1 for testing.
# See https://www.koii.network/docs/develop/command-line-tool/create-task-cli/create-task#space for calculation details.
space: 0.1
## Requirement Tags (Optional) ##
# To add more global variables and task variables, please refer to the type, value, description format shown below.
# The ORCA_TASK addon is REQUIRED
requirementsTags:
- type: ADDON
value: "ORCA_TASK"
- type: CPU
value: "4-core"
- type: RAM
value: "5 GB"
- type: STORAGE
value: "5 GB"
- type: TASK_VARIABLE
value: "ANTHROPIC_API_KEY"
description: "Your Anthropic API key. You can get one here: https://console.anthropic.com/settings/keys"
- type: TASK_VARIABLE
value: "GITHUB_USERNAME"
description: "Your GitHub username. You can sign up for an account here: https://github.com/join"
- type: TASK_VARIABLE
value: "GITHUB_TOKEN"
description: "Your GitHub Personal Access Token. You can create one here: https://github.com/settings/tokens"
## Tags ##
# See https://www.koii.network/docs/develop/command-line-tool/create-task-cli/create-task#tags for available tag options.
tags: ["AI"]
# Environment ##
# TEST or PRODUCTION
# Production mode will expose your task to all the task runners, even if not whitelisted.
environment: "TEST"
#################################### FOR UPDATING TASKS ONLY ####################################
## Old Task ID ##
task_id: "48h3f4r3AR7MdgCMkET4v3yh7PpPHuqGDWzqgH52rny1"
## Migration Description ##
migrationDescription: "Fix audit bug"
######################## ALL FIELDS ARE REQUIRED UNLESS OTHERWISE NOTED #########################
######################################### TASK METADATA #########################################
############################ Will be displayed in the desktop node ##############################
## Task Name ##
# Maximum 24 characters.
task_name: "Prometheus Docs Agent"
## Task Author ##
author: "Prometheus"
# Task Description Markdown ##
# If you specify a markdown file, the description field will be ignored.
# Markdown is recommended for better formatting.
markdownDescriptionPath: "./README.md"
## Task Description ##
# Ignored if you specify a markdown file.
description: "Task description."
## Repository URL ##
# Must be public for whitelisted tasks.
repositoryUrl: "https://github.com/koii-network/builder-247"
## Image URL ##
# 230x86 pixels.
imageUrl: "https://koii-k2-task-metadata.s3.us-east-2.amazonaws.com/Docs.png"
## Info URL ##
infoUrl: "https://www.koii.network/blog/Earn-Crypto-With-AI-Agent"
####################################### TASK CONFIGURATION ######################################
## Task Executable Network ##
# IPFS or DEVELOPMENT
# Keep this as IPFS unless you know you need to change it.
task_executable_network: "IPFS"
## Task Audit Program ##
# Task Executable Network IPFS: Path to your executable.
# Task Executable Network DEVELOPMENT: The value should be 'main'.
# Keep this as-is unless you know you need to change it.
task_audit_program: "dist/main.js"
## Round Time ##
# Duration of task, measured in slots (with each slot approximately equal to 408ms). Should be at least 800 slots.
# See https://www.koii.network/docs/concepts/what-are-tasks/what-are-tasks/gradual-consensus for more information on how round time, audit window, and submission window work.
round_time: 3000
## Audit Window ##
# The audit window should be at least 1/3 of the round time.
audit_window: 1300
## Submission Window ##
# The submission window should be at least 1/3 of the round time.
submission_window: 1300
## Minimum Stake Amount ##
# The minimum amount of KOII or KPL that a user must stake in order to participate in the task.
minimum_stake_amount: 0.01
## Task Bounty Type ##
# KOII or KPL
task_type: "KOII"
## Token Mint Address (ONLY for KPL tasks) ##
# The Fire Token address is provided as an example.
token_type: "4qayyw53kWz6GzypcejjT1cvwMXS1qYLSMQRE8se3gTv"
## Total Bounty Amount ##
# The total bounty amount that will be available for distribution over all rounds.
# Does nothing when updating a task.
total_bounty_amount: 11
## Bounty Amount per Round ##
# The maximum amount that can be distributed per round.
# If the actual distribution per round exceeds this amount, the distribution list will fail.
bounty_amount_per_round: 1
## Allowed Failed Distributions ##
# Number of retries allowed for the distribution list if it is fails audit.
# If all retries fail, the task will not distribute anything for the round.
# This is also the number of rounds of submissions it will keep.
allowed_failed_distributions: 8
## Space ##
# Expected Task Data Size in MBs for the account size.
# Minimums: 2 for whitelisted tasks, 1 for production, 0.1 for testing.
# See https://www.koii.network/docs/develop/command-line-tool/create-task-cli/create-task#space for calculation details.
space: 0.1
## Requirement Tags (Optional) ##
# To add more global variables and task variables, please refer to the type, value, description format shown below.
# The ORCA_TASK addon is REQUIRED
requirementsTags:
- type: ADDON
value: "ORCA_TASK"
- type: CPU
value: "4-core"
- type: RAM
value: "5 GB"
- type: STORAGE
value: "5 GB"
- type: TASK_VARIABLE
value: "ANTHROPIC_API_KEY"
description: "Your Anthropic API key. You can get one here: https://console.anthropic.com/settings/keys"
- type: TASK_VARIABLE
value: "GITHUB_USERNAME"
description: "Your GitHub username. You can sign up for an account here: https://github.com/join"
- type: TASK_VARIABLE
value: "GITHUB_TOKEN"
description: "Your GitHub Personal Access Token. You can create one here: https://github.com/settings/tokens"
## Tags ##
# See https://www.koii.network/docs/develop/command-line-tool/create-task-cli/create-task#tags for available tag options.
tags: ["AI"]
# Environment ##
# TEST or PRODUCTION
# Production mode will expose your task to all the task runners, even if not whitelisted.
environment: "TEST"
#################################### FOR UPDATING TASKS ONLY ####################################
## Old Task ID ##
task_id: "48h3f4r3AR7MdgCMkET4v3yh7PpPHuqGDWzqgH52rny1"
## Migration Description ##
migrationDescription: "Fix audit bug"

View File

@ -1,7 +1,6 @@
export default {
transform: { "^.+\\.tsx?$": "babel-jest" },
transformIgnorePatterns: ["/node_modules/(?!@babel/runtime)"],
moduleFileExtensions: ["ts", "tsx", "js", "jsx", "json", "node"],
testEnvironment: "node",
};
transform: { "^.+\\.tsx?$": "babel-jest" },
transformIgnorePatterns: ["/node_modules/(?!@babel/runtime)"],
moduleFileExtensions: ["ts", "tsx", "js", "jsx", "json", "node"],
testEnvironment: "node",
};

View File

@ -21,7 +21,6 @@ export async function task(roundNumber: number): Promise<void> {
// FORCE TO PAUSE 30 SECONDS
// No submission on Round 0 so no need to trigger fetch audit result before round 3
// Changed from 3 to 4 to have more time
// if (roundNumber >= 4) {
// const auditRound = roundNumber - 4;
// const response = await fetch(`${middleServerUrl}/summarizer/worker/update-audit-result`, {
@ -32,4 +31,4 @@ export async function task(roundNumber: number): Promise<void> {
// console.log(`[TASK] Fetched audit result for round ${auditRound}. Status: ${response.status}`);
// }
// console.log(`[TASK] EXECUTE TASK FOR ROUND ${roundNumber}`);
}
}

View File

@ -24,7 +24,7 @@ export async function submission(roundNumber: number): Promise<string | void> {
* The default implementation handles uploading the proofs to IPFS
* and returning the CID
*/
if(!await preRunCheck(roundNumber.toString())){
if (!(await preRunCheck(roundNumber.toString()))) {
return;
}
const stakingKeypair = await namespaceWrapper.getSubmitterAccount();
@ -34,14 +34,14 @@ export async function submission(roundNumber: number): Promise<string | void> {
throw new Error("No staking keypair or public key found");
}
const stakingKey = stakingKeypair.publicKey.toBase58();
const secretKey = stakingKeypair.secretKey;
console.log(`[SUBMISSION] Starting submission process for round ${roundNumber}`);
try {
const orcaClient = await initializeOrcaClient();
const shouldMakeSubmission = await namespaceWrapper.storeGet(`shouldMakeSubmission`);
if (!shouldMakeSubmission || shouldMakeSubmission !== "true") {
return;
}
@ -51,7 +51,7 @@ export async function submission(roundNumber: number): Promise<string | void> {
roundNumber,
stakingKey,
publicKey: pubKey,
secretKey
secretKey,
});
return cid || void 0;
@ -64,19 +64,19 @@ export async function submission(roundNumber: number): Promise<string | void> {
async function initializeOrcaClient() {
console.log("[SUBMISSION] Initializing Orca client...");
const orcaClient = await getOrcaClient();
if (!orcaClient) {
console.error("[SUBMISSION] Failed to initialize Orca client");
throw new Error("Failed to initialize Orca client");
}
console.log("[SUBMISSION] Orca client initialized successfully");
return orcaClient;
}
async function makeSubmission(params: SubmissionParams): Promise<string | void> {
const { orcaClient, roundNumber, stakingKey, publicKey, secretKey } = params;
const swarmBountyId = await namespaceWrapper.storeGet(`swarmBountyId`);
if (!swarmBountyId) {
console.log("[SUBMISSION] No swarm bounty id found for this round");
@ -94,35 +94,36 @@ async function makeSubmission(params: SubmissionParams): Promise<string | void>
prUrl: submissionData.prUrl,
stakingKey,
publicKey,
secretKey
secretKey,
});
const signature = await signSubmissionPayload({
taskId: TASK_ID,
roundNumber,
stakingKey,
pubKey: publicKey,
...submissionData
}, secretKey);
const signature = await signSubmissionPayload(
{
taskId: TASK_ID,
roundNumber,
stakingKey,
pubKey: publicKey,
...submissionData,
},
secretKey,
);
const cid = await storeSubmissionOnIPFS(signature);
await cleanupSubmissionState();
return cid;
}
async function fetchSubmissionData(orcaClient: any, swarmBountyId: string): Promise<SubmissionData | null> {
console.log(`[SUBMISSION] Fetching submission data for swarm bounty ${swarmBountyId}`);
const result = await orcaClient.podCall(`submission/${swarmBountyId}`);
if (!result || result.data === "No submission") {
console.log("[SUBMISSION] No existing submission found");
return null;
}
const submission = typeof result.data === 'object' && 'data' in result.data
? result.data.data
: result.data;
const submission = typeof result.data === "object" && "data" in result.data ? result.data.data : result.data;
if (!submission?.prUrl) {
throw new Error("Submission is missing PR URL");
@ -140,7 +141,7 @@ async function notifyMiddleServer(params: {
secretKey: Uint8Array<ArrayBufferLike>;
}) {
const { taskId, swarmBountyId, prUrl, stakingKey, publicKey, secretKey } = params;
const payload = {
taskId,
swarmBountyId,
@ -184,4 +185,4 @@ async function storeSubmissionOnIPFS(signature: string): Promise<string> {
async function cleanupSubmissionState(): Promise<void> {
await namespaceWrapper.storeSet(`shouldMakeSubmission`, "false");
await namespaceWrapper.storeSet(`swarmBountyId`, "");
}
}

View File

@ -82,4 +82,4 @@ export async function audit(cid: string, roundNumber: number, submitterKey: stri
// When Error---NO RETURN;
// return true;
}
}
}

View File

@ -14,7 +14,6 @@ import { middleServerUrl, status } from "../utils/constant";
//Example route
export async function routes() {
app.get("/value", async (_req, res) => {
const value = await namespaceWrapper.storeGet("value");
console.log("value", value);
@ -64,7 +63,7 @@ export async function routes() {
const message = req.body.message;
console.log("[TASK] req.body", req.body);
try {
if (!success){
if (!success) {
console.error("[TASK] Error summarizing repository:", message);
return;
}
@ -128,7 +127,6 @@ export async function routes() {
});
}
// TODO: To be completed
app.post("/failed-task", async (req, res) => {
res.status(200).json({ result: "Successfully saved task result" });

View File

@ -1,36 +1,35 @@
export function isValidAnthropicApiKey(key: string) {
const regex = /^sk-ant-[a-zA-Z0-9_-]{32,}$/;
return regex.test(key);
const regex = /^sk-ant-[a-zA-Z0-9_-]{32,}$/;
return regex.test(key);
}
export async function checkAnthropicAPIKey(apiKey: string) {
const response = await fetch('https://api.anthropic.com/v1/messages', {
method: 'POST',
const response = await fetch("https://api.anthropic.com/v1/messages", {
method: "POST",
headers: {
'x-api-key': apiKey,
'anthropic-version': '2023-06-01',
'content-type': 'application/json',
"x-api-key": apiKey,
"anthropic-version": "2023-06-01",
"content-type": "application/json",
},
body: JSON.stringify({
model: 'claude-3-opus-20240229', // or a cheaper model
model: "claude-3-opus-20240229", // or a cheaper model
max_tokens: 1, // minimal usage
messages: [{ role: 'user', content: 'Hi' }],
messages: [{ role: "user", content: "Hi" }],
}),
});
if (response.status === 200) {
console.log('✅ API key is valid and has credit.');
console.log("✅ API key is valid and has credit.");
return true;
} else {
const data = await response.json().catch(() => ({}));
if (response.status === 401) {
console.log('❌ Invalid API key.');
} else if (response.status === 403 && data.error?.message?.includes('billing')) {
console.log('❌ API key has no credit or is not authorized.');
console.log("❌ Invalid API key.");
} else if (response.status === 403 && data.error?.message?.includes("billing")) {
console.log("❌ API key has no credit or is not authorized.");
} else {
console.log('⚠️ Unexpected error:', data);
console.log("⚠️ Unexpected error:", data);
}
return false;
}
}

View File

@ -5,8 +5,8 @@ import { LogLevel } from "@_koii/namespace-wrapper/dist/types";
import { errorMessage, actionMessage, status } from "../constant";
import { checkAnthropicAPIKey } from "./anthropicCheck";
import { checkGitHub } from "./githubCheck";
export async function preRunCheck(roundNumber:string){
if (!process.env.ANTHROPIC_API_KEY) {
export async function preRunCheck(roundNumber: string) {
if (!process.env.ANTHROPIC_API_KEY) {
await namespaceWrapper.logMessage(
LogLevel.Error,
errorMessage.ANTHROPIC_API_KEY_INVALID,
@ -54,4 +54,4 @@ if (!process.env.ANTHROPIC_API_KEY) {
return false;
}
return true;
}
}

View File

@ -1,36 +1,36 @@
export async function checkGitHub(username: string, token: string) {
// 1. Check username
const userRes = await fetch(`https://api.github.com/users/${username}`);
const isUsernameValid = userRes.status === 200;
// 2. Check token
const tokenRes = await fetch('https://api.github.com/user', {
headers: {
Authorization: `token ${token}`,
},
});
const isTokenValid = tokenRes.status === 200;
const isIdentityValid = await checkGitHubIdentity(username, token);
return isIdentityValid&&isUsernameValid&&isTokenValid
// 1. Check username
const userRes = await fetch(`https://api.github.com/users/${username}`);
const isUsernameValid = userRes.status === 200;
// 2. Check token
const tokenRes = await fetch("https://api.github.com/user", {
headers: {
Authorization: `token ${token}`,
},
});
const isTokenValid = tokenRes.status === 200;
const isIdentityValid = await checkGitHubIdentity(username, token);
return isIdentityValid && isUsernameValid && isTokenValid;
}
async function checkGitHubIdentity(username: string, token: string) {
const res = await fetch('https://api.github.com/user', {
headers: {
Authorization: `token ${token}`,
Accept: 'application/vnd.github.v3+json',
},
});
if (res.status !== 200) {
return false
}
const data = await res.json();
if (data.login.toLowerCase() !== username.toLowerCase()) {
return false
}
return true
}
const res = await fetch("https://api.github.com/user", {
headers: {
Authorization: `token ${token}`,
Accept: "application/vnd.github.v3+json",
},
});
if (res.status !== 200) {
return false;
}
const data = await res.json();
if (data.login.toLowerCase() !== username.toLowerCase()) {
return false;
}
return true;
}

View File

@ -59,4 +59,4 @@ export const defaultBountyMarkdownFile =
export const customReward = 400 * 10 ** 9; // This should be in ROE!
export const middleServerUrl = "https://builder247-prod.dev.koii.network";
export const middleServerUrl = "https://builder247-prod.dev.koii.network";

View File

@ -12,90 +12,90 @@ interface BountyIssue {
}
export async function getExistingIssues(): Promise<BountyIssue[]> {
try {
// read from the bounty markdown file
try {
// read from the bounty markdown file
// console.log('Fetching markdown file from:', defaultBountyMarkdownFile);
const bountyMarkdownFile = await fetch(defaultBountyMarkdownFile);
const bountyMarkdownFileText = await bountyMarkdownFile.text();
// console.log('Raw markdown content:', bountyMarkdownFileText);
const bountyMarkdownFileLines = bountyMarkdownFileText.split("\n");
// console.log('Number of lines:', bountyMarkdownFileLines.length);
const issues: BountyIssue[] = [];
let isTableStarted = false;
for (const line of bountyMarkdownFileLines) {
// Skip empty lines
if (line.trim() === '') {
// console.log('Skipping empty line');
continue;
}
// console.log('Processing line:', line);
// Skip the title line starting with #
if (line.startsWith('#')) {
// console.log('Found title line:', line);
continue;
}
// Skip the header and separator lines
if (line.startsWith('|') && line.includes('GitHub URL')) {
//console.log('Found header line');
continue;
}
if (line.startsWith('|') && line.includes('-----')) {
// console.log('Found separator line');
continue;
}
// Process table rows
if (line.startsWith('|')) {
isTableStarted = true;
// Remove first and last | and split by |
const cells = line.slice(1, -1).split('|').map(cell => cell.trim());
// console.log('Parsed cells:', cells);
// Extract GitHub URL and name from markdown link format [name](url)
const githubUrlMatch = cells[0].match(/\[(.*?)\]\((.*?)\)/);
// console.log('GitHub URL match:', githubUrlMatch);
const projectName = githubUrlMatch ? githubUrlMatch[1] : '';
const githubUrl = githubUrlMatch ? githubUrlMatch[2] : '';
const issue: BountyIssue = {
githubUrl,
projectName,
bountyTask: cells[1],
description: cells[3],
bountyAmount: cells[4],
bountyType: cells[5],
transactionHash: cells[6],
status: cells[7]
};
// console.log('Created issue object:', issue);
issues.push(issue);
}
}
// Filter all issues with status "Initialized" && Bounty Task is Document & Summarize
console.log('Final parsed issues number:', issues.length);
return issues
} catch (error) {
// console.error('Error processing markdown:', error);
throw error;
}
}
const bountyMarkdownFile = await fetch(defaultBountyMarkdownFile);
const bountyMarkdownFileText = await bountyMarkdownFile.text();
// console.log('Raw markdown content:', bountyMarkdownFileText);
const bountyMarkdownFileLines = bountyMarkdownFileText.split("\n");
// console.log('Number of lines:', bountyMarkdownFileLines.length);
const issues: BountyIssue[] = [];
let isTableStarted = false;
for (const line of bountyMarkdownFileLines) {
// Skip empty lines
if (line.trim() === "") {
// console.log('Skipping empty line');
continue;
}
// console.log('Processing line:', line);
// Skip the title line starting with #
if (line.startsWith("#")) {
// console.log('Found title line:', line);
continue;
}
// Skip the header and separator lines
if (line.startsWith("|") && line.includes("GitHub URL")) {
//console.log('Found header line');
continue;
}
if (line.startsWith("|") && line.includes("-----")) {
// console.log('Found separator line');
continue;
}
// Process table rows
if (line.startsWith("|")) {
isTableStarted = true;
// Remove first and last | and split by |
const cells = line
.slice(1, -1)
.split("|")
.map((cell) => cell.trim());
// console.log('Parsed cells:', cells);
// Extract GitHub URL and name from markdown link format [name](url)
const githubUrlMatch = cells[0].match(/\[(.*?)\]\((.*?)\)/);
// console.log('GitHub URL match:', githubUrlMatch);
const projectName = githubUrlMatch ? githubUrlMatch[1] : "";
const githubUrl = githubUrlMatch ? githubUrlMatch[2] : "";
const issue: BountyIssue = {
githubUrl,
projectName,
bountyTask: cells[1],
description: cells[3],
bountyAmount: cells[4],
bountyType: cells[5],
transactionHash: cells[6],
status: cells[7],
};
// console.log('Created issue object:', issue);
issues.push(issue);
}
}
// Filter all issues with status "Initialized" && Bounty Task is Document & Summarize
console.log("Final parsed issues number:", issues.length);
return issues;
} catch (error) {
// console.error('Error processing markdown:', error);
throw error;
}
}
export async function getInitializedDocumentSummarizeIssues(issues: BountyIssue[]) {
return issues.filter(issue => issue.status === "Initialized" && issue.bountyTask === "Document & Summarize");
return issues.filter((issue) => issue.status === "Initialized" && issue.bountyTask === "Document & Summarize");
}
// async function main(){
// const existingIssues = await getExistingIssues();
// const transactionHashs = [
@ -146,7 +146,7 @@ export async function getInitializedDocumentSummarizeIssues(issues: BountyIssue[
// if (initializedDocumentSummarizeIssues.length == 0) {
// console.log("No issues pending to be summarized");
// return;
// }
// }
// console.log("Initialized Document & Summarize issues number:", initializedDocumentSummarizeIssues.length);
// }
// async function main() {
@ -158,4 +158,4 @@ export async function getInitializedDocumentSummarizeIssues(issues: BountyIssue[
// }
// }
// main();
// main();

View File

@ -140,13 +140,13 @@ export async function getRandomNodes(roundNumber: number, numberOfNodes: number)
const lastRoundSubmissions = lastRoundSubmission.submissions;
console.log("Last round submissions:", lastRoundSubmissions);
// Get the last round number
const lastRound = Object.keys(lastRoundSubmissions).pop();
if (!lastRound) {
return [];
}
// Get the submissions for that round
const submissions = lastRoundSubmissions[lastRound];
console.log("Submissions:", submissions);
@ -156,12 +156,12 @@ export async function getRandomNodes(roundNumber: number, numberOfNodes: number)
if (availableKeys.length <= numberOfNodes) {
return availableKeys;
}
const seed = TASK_ID + roundNumber.toString() || "default" + roundNumber;
const rng = seedrandom(seed);
// Use the keys from the submissions object
const randomKeys = availableKeys.sort(() => rng() - 0.5).slice(0, numberOfNodes);
console.log("Random keys:", randomKeys);
return randomKeys;
}

View File

@ -3,38 +3,45 @@ import { getFile } from "./ipfs";
import { Submission } from "@_koii/namespace-wrapper/dist/types";
import { Submitter } from "@_koii/task-manager/dist/types/global";
import { namespaceWrapper } from "@_koii/namespace-wrapper";
export async function submissionJSONSignatureDecode({submission_value, submitterPublicKey, roundNumber}: {submission_value: string, submitterPublicKey: string, roundNumber: number}) {
let submissionString;
try {
console.log("Getting file from IPFS", submission_value);
submissionString = await getFile(submission_value);
console.log("submissionString", submissionString);
} catch (error) {
console.log("error", error);
console.error("INVALID SIGNATURE DATA");
return null;
}
// verify the signature of the submission
const submission = JSON.parse(submissionString);
console.log("submission", submission);
const signaturePayload = await namespaceWrapper.verifySignature(submission.signature, submitterPublicKey);
if (!signaturePayload.data) {
console.error("INVALID SIGNATURE");
return null;
}
const data = JSON.parse(signaturePayload.data);
console.log("signaturePayload", signaturePayload);
console.log("data", data);
if (
data.taskId !== TASK_ID ||
data.roundNumber !== roundNumber ||
data.stakingKey !== submitterPublicKey ||
!data.pubKey ||
!data.prUrl
) {
console.error("INVALID SIGNATURE DATA");
return null;
}
return data;
}
export async function submissionJSONSignatureDecode({
submission_value,
submitterPublicKey,
roundNumber,
}: {
submission_value: string;
submitterPublicKey: string;
roundNumber: number;
}) {
let submissionString;
try {
console.log("Getting file from IPFS", submission_value);
submissionString = await getFile(submission_value);
console.log("submissionString", submissionString);
} catch (error) {
console.log("error", error);
console.error("INVALID SIGNATURE DATA");
return null;
}
// verify the signature of the submission
const submission = JSON.parse(submissionString);
console.log("submission", submission);
const signaturePayload = await namespaceWrapper.verifySignature(submission.signature, submitterPublicKey);
if (!signaturePayload.data) {
console.error("INVALID SIGNATURE");
return null;
}
const data = JSON.parse(signaturePayload.data);
console.log("signaturePayload", signaturePayload);
console.log("data", data);
if (
data.taskId !== TASK_ID ||
data.roundNumber !== roundNumber ||
data.stakingKey !== submitterPublicKey ||
!data.pubKey ||
!data.prUrl
) {
console.error("INVALID SIGNATURE DATA");
return null;
}
return data;
}

View File

@ -5,105 +5,107 @@ import { actionMessage, errorMessage, middleServerUrl } from "../constant";
import { TASK_ID, namespaceWrapper } from "@_koii/namespace-wrapper";
import { LogLevel } from "@_koii/namespace-wrapper/dist/types";
export async function task(){
while (true) {
try {
let requiredWorkResponse;
const orcaClient = await getOrcaClient();
// check if the env variable is valid
const stakingKeypair = await namespaceWrapper.getSubmitterAccount()!;
const pubKey = await namespaceWrapper.getMainAccountPubkey();
if (!orcaClient || !stakingKeypair || !pubKey) {
await namespaceWrapper.logMessage(LogLevel.Error, errorMessage.NO_ORCA_CLIENT, actionMessage.NO_ORCA_CLIENT);
// Wait for 1 minute before retrying
await new Promise(resolve => setTimeout(resolve, 60000));
continue;
}
const stakingKey = stakingKeypair.publicKey.toBase58();
export async function task() {
while (true) {
try {
let requiredWorkResponse;
const orcaClient = await getOrcaClient();
// check if the env variable is valid
const stakingKeypair = await namespaceWrapper.getSubmitterAccount()!;
const pubKey = await namespaceWrapper.getMainAccountPubkey();
if (!orcaClient || !stakingKeypair || !pubKey) {
await namespaceWrapper.logMessage(LogLevel.Error, errorMessage.NO_ORCA_CLIENT, actionMessage.NO_ORCA_CLIENT);
// Wait for 1 minute before retrying
await new Promise((resolve) => setTimeout(resolve, 60000));
continue;
}
const stakingKey = stakingKeypair.publicKey.toBase58();
/****************** All these issues need to be generate a markdown file ******************/
/****************** All these issues need to be generate a markdown file ******************/
const signature = await namespaceWrapper.payloadSigning(
{
taskId: TASK_ID,
// roundNumber: roundNumber,
action: "fetch-todo",
githubUsername: stakingKey,
stakingKey: stakingKey,
},
stakingKeypair.secretKey,
);
const retryDelay = 10000; // 10 seconds in milliseconds
const signature = await namespaceWrapper.payloadSigning(
{
taskId: TASK_ID,
// roundNumber: roundNumber,
action: "fetch-todo",
githubUsername: stakingKey,
stakingKey: stakingKey,
},
stakingKeypair.secretKey,
);
while (true) {
requiredWorkResponse = await fetch(`${middleServerUrl}/summarizer/worker/fetch-todo`, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({ signature: signature, stakingKey: stakingKey }),
});
const retryDelay = 10000; // 10 seconds in milliseconds
if (requiredWorkResponse.status === 200) {
break;
}
while (true) {
requiredWorkResponse = await fetch(`${middleServerUrl}/summarizer/worker/fetch-todo`, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({ signature: signature, stakingKey: stakingKey }),
});
console.log(`[TASK] Server returned status ${requiredWorkResponse.status}, retrying in ${retryDelay/1000} seconds...`);
await new Promise(resolve => setTimeout(resolve, retryDelay));
}
// check if the response is 200 after all retries
if (!requiredWorkResponse || requiredWorkResponse.status !== 200) {
// await namespaceWrapper.storeSet(`result-${roundNumber}`, status.NO_ISSUES_PENDING_TO_BE_SUMMARIZED);
return;
}
const requiredWorkResponseData = await requiredWorkResponse.json();
console.log("[TASK] requiredWorkResponseData: ", requiredWorkResponseData);
// const uuid = uuidv4();
const alreadyAssigned = await namespaceWrapper.storeGet(JSON.stringify(requiredWorkResponseData.data.id));
if (alreadyAssigned) {
return;
}else{
await namespaceWrapper.storeSet(JSON.stringify(requiredWorkResponseData.data.id), "initialized");
}
const podcallPayload = {
taskId: TASK_ID,
};
const podCallSignature = await namespaceWrapper.payloadSigning(podcallPayload, stakingKeypair.secretKey);
const jsonBody = {
task_id: TASK_ID,
swarmBountyId: requiredWorkResponseData.data.id,
repo_url: `https://github.com/${requiredWorkResponseData.data.repo_owner}/${requiredWorkResponseData.data.repo_name}`,
podcall_signature: podCallSignature,
};
console.log("[TASK] jsonBody: ", jsonBody);
try {
const repoSummaryResponse = await orcaClient.podCall(`worker-task`, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify(jsonBody),
});
console.log("[TASK] repoSummaryResponse: ", repoSummaryResponse);
if (repoSummaryResponse.status !== 200) {
// await namespaceWrapper.storeSet(`result-${roundNumber}`, status.ISSUE_SUMMARIZATION_FAILED);
}
} catch (error) {
// await namespaceWrapper.storeSet(`result-${roundNumber}`, status.ISSUE_SUMMARIZATION_FAILED);
console.error("[TASK] EXECUTE TASK ERROR:", error);
}
} catch (error) {
console.error("[TASK] EXECUTE TASK ERROR:", error);
// Wait for 1 minute before retrying on error
await new Promise(resolve => setTimeout(resolve, 60000));
if (requiredWorkResponse.status === 200) {
break;
}
// Wait for 1 minute before starting the next iteration
await new Promise(resolve => setTimeout(resolve, 60000));
console.log(
`[TASK] Server returned status ${requiredWorkResponse.status}, retrying in ${retryDelay / 1000} seconds...`,
);
await new Promise((resolve) => setTimeout(resolve, retryDelay));
}
// check if the response is 200 after all retries
if (!requiredWorkResponse || requiredWorkResponse.status !== 200) {
// await namespaceWrapper.storeSet(`result-${roundNumber}`, status.NO_ISSUES_PENDING_TO_BE_SUMMARIZED);
return;
}
const requiredWorkResponseData = await requiredWorkResponse.json();
console.log("[TASK] requiredWorkResponseData: ", requiredWorkResponseData);
// const uuid = uuidv4();
const alreadyAssigned = await namespaceWrapper.storeGet(JSON.stringify(requiredWorkResponseData.data.id));
if (alreadyAssigned) {
return;
} else {
await namespaceWrapper.storeSet(JSON.stringify(requiredWorkResponseData.data.id), "initialized");
}
const podcallPayload = {
taskId: TASK_ID,
};
const podCallSignature = await namespaceWrapper.payloadSigning(podcallPayload, stakingKeypair.secretKey);
const jsonBody = {
task_id: TASK_ID,
swarmBountyId: requiredWorkResponseData.data.id,
repo_url: `https://github.com/${requiredWorkResponseData.data.repo_owner}/${requiredWorkResponseData.data.repo_name}`,
podcall_signature: podCallSignature,
};
console.log("[TASK] jsonBody: ", jsonBody);
try {
const repoSummaryResponse = await orcaClient.podCall(`worker-task`, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify(jsonBody),
});
console.log("[TASK] repoSummaryResponse: ", repoSummaryResponse);
if (repoSummaryResponse.status !== 200) {
// await namespaceWrapper.storeSet(`result-${roundNumber}`, status.ISSUE_SUMMARIZATION_FAILED);
}
} catch (error) {
// await namespaceWrapper.storeSet(`result-${roundNumber}`, status.ISSUE_SUMMARIZATION_FAILED);
console.error("[TASK] EXECUTE TASK ERROR:", error);
}
} catch (error) {
console.error("[TASK] EXECUTE TASK ERROR:", error);
// Wait for 1 minute before retrying on error
await new Promise((resolve) => setTimeout(resolve, 60000));
}
}
// Wait for 1 minute before starting the next iteration
await new Promise((resolve) => setTimeout(resolve, 60000));
}
}

View File

@ -19,11 +19,13 @@ tests/
## Prerequisites
1. Install the test framework:
```bash
pip install -e test-framework/
```
2. Set up environment variables in `.env`:
```
ANTHROPIC_API_KEY=your_test_key
GITHUB_USERNAME=your_test_username
@ -47,12 +49,15 @@ python -m tests.e2e --reset
## Test Flow
1. API Key Validation
- Validates Anthropic API key
2. GitHub Validation
- Validates GitHub credentials
3. Todo Management
- Fetches todos for each worker
- Generates summaries
- Submits results

View File

@ -1,12 +1,8 @@
import "dotenv/config";
export const TASK_ID =
process.env.TASK_ID || "BXbYKFdXZhQgEaMFbeShaisQBYG1FD4MiSf9gg4n6mVn";
export const WEBPACKED_FILE_PATH =
process.env.WEBPACKED_FILE_PATH || "../dist/main.js";
export const TASK_ID = process.env.TASK_ID || "BXbYKFdXZhQgEaMFbeShaisQBYG1FD4MiSf9gg4n6mVn";
export const WEBPACKED_FILE_PATH = process.env.WEBPACKED_FILE_PATH || "../dist/main.js";
const envKeywords = process.env.TEST_KEYWORDS ?? "";
export const TEST_KEYWORDS = envKeywords
? envKeywords.split(",")
: ["TEST", "EZ TESTING"];
export const TEST_KEYWORDS = envKeywords ? envKeywords.split(",") : ["TEST", "EZ TESTING"];

View File

@ -1,188 +1,188 @@
import { initializeTaskManager, taskRunner } from "@_koii/task-manager";
import { setup } from "../src/task/0-setup";
import { task } from "../src/task/1-task";
import { submission } from "../src/task/2-submission";
import { audit } from "../src/task/3-audit";
import { distribution } from "../src/task/4-distribution";
import { routes } from "../src/task/5-routes";
import { namespaceWrapper, _server } from "@_koii/task-manager/namespace-wrapper";
import Joi from "joi";
import axios from "axios";
import { Submitter } from "@_koii/task-manager";
beforeAll(async () => {
await namespaceWrapper.defaultTaskSetup();
initializeTaskManager({
setup,
task,
submission,
audit,
distribution,
routes,
});
});
describe("Performing the task", () => {
it("should performs the core logic task", async () => {
const round = 1;
await taskRunner.task(round);
const value = await namespaceWrapper.storeGet("value");
expect(value).toBeDefined();
expect(value).not.toBeNull();
});
it("should make the submission to k2 for dummy round 1", async () => {
const round = 1;
await taskRunner.submitTask(round);
const taskState = await namespaceWrapper.getTaskState({});
const schema = Joi.object()
.pattern(
Joi.string(),
Joi.object().pattern(
Joi.string(),
Joi.object({
submission_value: Joi.string().required(),
slot: Joi.number().integer().required(),
round: Joi.number().integer().required(),
}),
),
)
.required()
.min(1);
const validationResult = schema.validate(taskState?.submissions);
try {
expect(validationResult.error).toBeUndefined();
} catch (e) {
throw new Error("Submission doesn't exist or is incorrect");
}
});
it("should make an audit on submission", async () => {
const round = 1;
await taskRunner.auditTask(round);
const taskState = await namespaceWrapper.getTaskState({});
console.log("TASK STATE", taskState);
console.log("audit task", taskState?.submissions_audit_trigger);
const schema = Joi.object()
.pattern(
Joi.string(),
Joi.object().pattern(
Joi.string(),
Joi.object({
trigger_by: Joi.string().required(),
slot: Joi.number().integer().required(),
votes: Joi.array().required(),
}),
),
)
.required();
const validationResult = schema.validate(taskState?.submissions_audit_trigger);
try {
expect(validationResult.error).toBeUndefined();
} catch (e) {
throw new Error("Submission audit is incorrect");
}
});
it("should make the distribution submission to k2 for dummy round 1", async () => {
const round = 1;
await taskRunner.submitDistributionList(round);
const taskState = await namespaceWrapper.getTaskState({});
const schema = Joi.object()
.pattern(
Joi.string(),
Joi.object().pattern(
Joi.string(),
Joi.object({
submission_value: Joi.string().required(),
slot: Joi.number().integer().required(),
round: Joi.number().integer().required(),
}),
),
)
.required()
.min(1);
console.log("Distribution submission", taskState?.distribution_rewards_submission);
const validationResult = schema.validate(taskState?.distribution_rewards_submission);
try {
expect(validationResult.error).toBeUndefined();
} catch (e) {
throw new Error("Distribution submission doesn't exist or is incorrect");
}
});
it("should make an audit on distribution submission", async () => {
const round = 1;
await taskRunner.auditDistribution(round);
const taskState = await namespaceWrapper.getTaskState({});
console.log("audit task", taskState?.distributions_audit_trigger);
const schema = Joi.object()
.pattern(
Joi.string(),
Joi.object().pattern(
Joi.string(),
Joi.object({
trigger_by: Joi.string().required(),
slot: Joi.number().integer().required(),
votes: Joi.array().required(),
}),
),
)
.required();
const validationResult = schema.validate(taskState?.distributions_audit_trigger);
try {
expect(validationResult.error).toBeUndefined();
} catch (e) {
throw new Error("Distribution audit is incorrect");
}
});
it("should make sure the submitted distribution list is valid", async () => {
const round = 1;
const distributionList = await namespaceWrapper.getDistributionList("", round);
console.log("Generated distribution List", JSON.parse(distributionList.toString()));
const schema = Joi.object().pattern(Joi.string().required(), Joi.number().integer().required()).required();
const validationResult = schema.validate(JSON.parse(distributionList.toString()));
console.log(validationResult);
try {
expect(validationResult.error).toBeUndefined();
} catch (e) {
throw new Error("Submitted distribution list is not valid");
}
});
it("should test the endpoint", async () => {
const response = await axios.get("http://localhost:3000");
expect(response.status).toBe(200);
expect(response.data).toEqual({ message: "Running", status: 200 });
});
it("should generate a empty distribution list when submission is 0", async () => {
const submitters: Submitter[] = [];
const bounty = Math.floor(Math.random() * 1e15) + 1;
const roundNumber = Math.floor(Math.random() * 1e5) + 1;
const distributionList = await distribution(submitters, bounty, roundNumber);
expect(distributionList).toEqual({});
});
it("should generate a distribution list contains all the submitters", async () => {
const simulatedSubmitters = 5;
const submitters: Submitter[] = [];
// 10k is the rough maximum number of submitters
for (let i = 0; i < simulatedSubmitters; i++) {
const publicKey = `mockPublicKey${i}`;
submitters.push({
publicKey,
votes: Math.floor(Math.random() * simulatedSubmitters) - 5000,
stake: Math.floor(Math.random() * 1e9) + 1,
});
}
const bounty = Math.floor(Math.random() * 1e15) + 1;
const roundNumber = 1;
const distributionList = await distribution(submitters, bounty, roundNumber);
expect(Object.keys(distributionList).length).toBe(submitters.length);
expect(Object.keys(distributionList).sort()).toEqual(submitters.map((submitter) => submitter.publicKey).sort());
});
});
afterAll(async () => {
_server.close();
});
import { initializeTaskManager, taskRunner } from "@_koii/task-manager";
import { setup } from "../src/task/0-setup";
import { task } from "../src/task/1-task";
import { submission } from "../src/task/2-submission";
import { audit } from "../src/task/3-audit";
import { distribution } from "../src/task/4-distribution";
import { routes } from "../src/task/5-routes";
import { namespaceWrapper, _server } from "@_koii/task-manager/namespace-wrapper";
import Joi from "joi";
import axios from "axios";
import { Submitter } from "@_koii/task-manager";
beforeAll(async () => {
await namespaceWrapper.defaultTaskSetup();
initializeTaskManager({
setup,
task,
submission,
audit,
distribution,
routes,
});
});
describe("Performing the task", () => {
it("should performs the core logic task", async () => {
const round = 1;
await taskRunner.task(round);
const value = await namespaceWrapper.storeGet("value");
expect(value).toBeDefined();
expect(value).not.toBeNull();
});
it("should make the submission to k2 for dummy round 1", async () => {
const round = 1;
await taskRunner.submitTask(round);
const taskState = await namespaceWrapper.getTaskState({});
const schema = Joi.object()
.pattern(
Joi.string(),
Joi.object().pattern(
Joi.string(),
Joi.object({
submission_value: Joi.string().required(),
slot: Joi.number().integer().required(),
round: Joi.number().integer().required(),
}),
),
)
.required()
.min(1);
const validationResult = schema.validate(taskState?.submissions);
try {
expect(validationResult.error).toBeUndefined();
} catch (e) {
throw new Error("Submission doesn't exist or is incorrect");
}
});
it("should make an audit on submission", async () => {
const round = 1;
await taskRunner.auditTask(round);
const taskState = await namespaceWrapper.getTaskState({});
console.log("TASK STATE", taskState);
console.log("audit task", taskState?.submissions_audit_trigger);
const schema = Joi.object()
.pattern(
Joi.string(),
Joi.object().pattern(
Joi.string(),
Joi.object({
trigger_by: Joi.string().required(),
slot: Joi.number().integer().required(),
votes: Joi.array().required(),
}),
),
)
.required();
const validationResult = schema.validate(taskState?.submissions_audit_trigger);
try {
expect(validationResult.error).toBeUndefined();
} catch (e) {
throw new Error("Submission audit is incorrect");
}
});
it("should make the distribution submission to k2 for dummy round 1", async () => {
const round = 1;
await taskRunner.submitDistributionList(round);
const taskState = await namespaceWrapper.getTaskState({});
const schema = Joi.object()
.pattern(
Joi.string(),
Joi.object().pattern(
Joi.string(),
Joi.object({
submission_value: Joi.string().required(),
slot: Joi.number().integer().required(),
round: Joi.number().integer().required(),
}),
),
)
.required()
.min(1);
console.log("Distribution submission", taskState?.distribution_rewards_submission);
const validationResult = schema.validate(taskState?.distribution_rewards_submission);
try {
expect(validationResult.error).toBeUndefined();
} catch (e) {
throw new Error("Distribution submission doesn't exist or is incorrect");
}
});
it("should make an audit on distribution submission", async () => {
const round = 1;
await taskRunner.auditDistribution(round);
const taskState = await namespaceWrapper.getTaskState({});
console.log("audit task", taskState?.distributions_audit_trigger);
const schema = Joi.object()
.pattern(
Joi.string(),
Joi.object().pattern(
Joi.string(),
Joi.object({
trigger_by: Joi.string().required(),
slot: Joi.number().integer().required(),
votes: Joi.array().required(),
}),
),
)
.required();
const validationResult = schema.validate(taskState?.distributions_audit_trigger);
try {
expect(validationResult.error).toBeUndefined();
} catch (e) {
throw new Error("Distribution audit is incorrect");
}
});
it("should make sure the submitted distribution list is valid", async () => {
const round = 1;
const distributionList = await namespaceWrapper.getDistributionList("", round);
console.log("Generated distribution List", JSON.parse(distributionList.toString()));
const schema = Joi.object().pattern(Joi.string().required(), Joi.number().integer().required()).required();
const validationResult = schema.validate(JSON.parse(distributionList.toString()));
console.log(validationResult);
try {
expect(validationResult.error).toBeUndefined();
} catch (e) {
throw new Error("Submitted distribution list is not valid");
}
});
it("should test the endpoint", async () => {
const response = await axios.get("http://localhost:3000");
expect(response.status).toBe(200);
expect(response.data).toEqual({ message: "Running", status: 200 });
});
it("should generate a empty distribution list when submission is 0", async () => {
const submitters: Submitter[] = [];
const bounty = Math.floor(Math.random() * 1e15) + 1;
const roundNumber = Math.floor(Math.random() * 1e5) + 1;
const distributionList = await distribution(submitters, bounty, roundNumber);
expect(distributionList).toEqual({});
});
it("should generate a distribution list contains all the submitters", async () => {
const simulatedSubmitters = 5;
const submitters: Submitter[] = [];
// 10k is the rough maximum number of submitters
for (let i = 0; i < simulatedSubmitters; i++) {
const publicKey = `mockPublicKey${i}`;
submitters.push({
publicKey,
votes: Math.floor(Math.random() * simulatedSubmitters) - 5000,
stake: Math.floor(Math.random() * 1e9) + 1,
});
}
const bounty = Math.floor(Math.random() * 1e15) + 1;
const roundNumber = 1;
const distributionList = await distribution(submitters, bounty, roundNumber);
expect(Object.keys(distributionList).length).toBe(submitters.length);
expect(Object.keys(distributionList).sort()).toEqual(submitters.map((submitter) => submitter.publicKey).sort());
});
});
afterAll(async () => {
_server.close();
});

View File

@ -1,84 +1,84 @@
import { taskRunner } from "@_koii/task-manager";
import "../src/index.js";
import { namespaceWrapper } from "@_koii/task-manager/namespace-wrapper";
import { Keypair } from "@_koii/web3.js";
const numRounds = parseInt(process.argv[2]) || 1;
const roundDelay = parseInt(process.argv[3]) || 5000;
const functionDelay = parseInt(process.argv[4]) || 1000;
let TASK_TIMES: number[] = [];
let SUBMISSION_TIMES: number[] = [];
let AUDIT_TIMES: number[] = [];
function sleep(ms: number) {
return new Promise((resolve) => setTimeout(resolve, ms));
}
async function executeTasks() {
const keypair = Keypair.generate();
await namespaceWrapper.stakeOnChain(keypair.publicKey, keypair, keypair.publicKey, 10000);
for (let round = 0; round < numRounds; round++) {
const taskStartTime = Date.now();
await taskRunner.task(round);
const taskEndTime = Date.now();
TASK_TIMES.push(taskEndTime - taskStartTime);
await sleep(functionDelay);
const taskSubmissionStartTime = Date.now();
await taskRunner.submitTask(round);
const taskSubmissionEndTime = Date.now();
SUBMISSION_TIMES.push(taskSubmissionEndTime - taskSubmissionStartTime);
await sleep(functionDelay);
const auditStartTime = Date.now();
await taskRunner.auditTask(round);
const auditEndTime = Date.now();
AUDIT_TIMES.push(auditEndTime - auditStartTime);
await sleep(functionDelay);
await taskRunner.selectAndGenerateDistributionList(round);
await sleep(functionDelay);
await taskRunner.auditDistribution(round);
if (round < numRounds - 1) {
await sleep(roundDelay);
}
}
console.log("TIME METRICS BELOW");
function metrics(name: string, times: number[]) {
const average = (arr: number[]) => arr.reduce((a, b) => a + b, 0) / arr.length;
const formatTime = (ms: number) => (ms / 1000).toFixed(4);
const formatSlot = (ms: number) => Math.ceil(ms / 408);
const min = Math.min(...times);
const max = Math.max(...times);
const avg = average(times);
const timeMin = formatTime(min);
const timeMax = formatTime(max);
const timeAvg = formatTime(avg);
const slotMin = formatSlot(min);
const slotMax = formatSlot(max);
const slotAvg = formatSlot(avg);
return {
Metric: `SIMULATED ${name} WINDOW`,
"Avg Time (s)": timeAvg,
"Avg Slots": slotAvg,
"Min Time (s)": timeMin,
"Min Slots": slotMin,
"Max Time (s)": timeMax,
"Max Slots": slotMax,
};
}
const timeMetrics = metrics("TASK", TASK_TIMES);
const submissionMetrics = metrics("SUBMISSION", SUBMISSION_TIMES);
const auditMetrics = metrics("AUDIT", AUDIT_TIMES);
console.table([timeMetrics, submissionMetrics, auditMetrics]);
console.log("All tasks executed. Test completed.");
process.exit(0);
}
setTimeout(executeTasks, 1500);
import { taskRunner } from "@_koii/task-manager";
import "../src/index.js";
import { namespaceWrapper } from "@_koii/task-manager/namespace-wrapper";
import { Keypair } from "@_koii/web3.js";
const numRounds = parseInt(process.argv[2]) || 1;
const roundDelay = parseInt(process.argv[3]) || 5000;
const functionDelay = parseInt(process.argv[4]) || 1000;
let TASK_TIMES: number[] = [];
let SUBMISSION_TIMES: number[] = [];
let AUDIT_TIMES: number[] = [];
function sleep(ms: number) {
return new Promise((resolve) => setTimeout(resolve, ms));
}
async function executeTasks() {
const keypair = Keypair.generate();
await namespaceWrapper.stakeOnChain(keypair.publicKey, keypair, keypair.publicKey, 10000);
for (let round = 0; round < numRounds; round++) {
const taskStartTime = Date.now();
await taskRunner.task(round);
const taskEndTime = Date.now();
TASK_TIMES.push(taskEndTime - taskStartTime);
await sleep(functionDelay);
const taskSubmissionStartTime = Date.now();
await taskRunner.submitTask(round);
const taskSubmissionEndTime = Date.now();
SUBMISSION_TIMES.push(taskSubmissionEndTime - taskSubmissionStartTime);
await sleep(functionDelay);
const auditStartTime = Date.now();
await taskRunner.auditTask(round);
const auditEndTime = Date.now();
AUDIT_TIMES.push(auditEndTime - auditStartTime);
await sleep(functionDelay);
await taskRunner.selectAndGenerateDistributionList(round);
await sleep(functionDelay);
await taskRunner.auditDistribution(round);
if (round < numRounds - 1) {
await sleep(roundDelay);
}
}
console.log("TIME METRICS BELOW");
function metrics(name: string, times: number[]) {
const average = (arr: number[]) => arr.reduce((a, b) => a + b, 0) / arr.length;
const formatTime = (ms: number) => (ms / 1000).toFixed(4);
const formatSlot = (ms: number) => Math.ceil(ms / 408);
const min = Math.min(...times);
const max = Math.max(...times);
const avg = average(times);
const timeMin = formatTime(min);
const timeMax = formatTime(max);
const timeAvg = formatTime(avg);
const slotMin = formatSlot(min);
const slotMax = formatSlot(max);
const slotAvg = formatSlot(avg);
return {
Metric: `SIMULATED ${name} WINDOW`,
"Avg Time (s)": timeAvg,
"Avg Slots": slotAvg,
"Min Time (s)": timeMin,
"Min Slots": slotMin,
"Max Time (s)": timeMax,
"Max Slots": slotMax,
};
}
const timeMetrics = metrics("TASK", TASK_TIMES);
const submissionMetrics = metrics("SUBMISSION", SUBMISSION_TIMES);
const auditMetrics = metrics("AUDIT", AUDIT_TIMES);
console.table([timeMetrics, submissionMetrics, auditMetrics]);
console.log("All tasks executed. Test completed.");
process.exit(0);
}
setTimeout(executeTasks, 1500);

View File

@ -4,7 +4,7 @@
// headers: {
// "Content-Type": "application/json",
// },
// body: JSON.stringify({
// body: JSON.stringify({
// text: `[TASK] Error summarizing issue:\n ${JSON.stringify({
// status: "error",
// data: {
@ -16,4 +16,4 @@
// console.log("[TASK] slackResponse: ", slackResponse);
// }
// testSlackWebhook();
// testSlackWebhook();

File diff suppressed because it is too large Load Diff

View File

@ -1,225 +1,225 @@
/* tslint:disable */
/* eslint-disable */
/**
* @param {any} val
* @returns {any}
*/
* @param {any} val
* @returns {any}
*/
export function bincode_js_deserialize(val: any): any;
/**
* @param {any} val
* @returns {any}
*/
* @param {any} val
* @returns {any}
*/
export function borsh_bpf_js_deserialize(val: any): any;
/**
* Initialize Javascript logging and panic handler
*/
* Initialize Javascript logging and panic handler
*/
export function solana_program_init(): void;
/**
* A hash; the 32-byte output of a hashing algorithm.
*
* This struct is used most often in `solana-sdk` and related crates to contain
* a [SHA-256] hash, but may instead contain a [blake3] hash, as created by the
* [`blake3`] module (and used in [`Message::hash`]).
*
* [SHA-256]: https://en.wikipedia.org/wiki/SHA-2
* [blake3]: https://github.com/BLAKE3-team/BLAKE3
* [`blake3`]: crate::blake3
* [`Message::hash`]: crate::message::Message::hash
*/
* A hash; the 32-byte output of a hashing algorithm.
*
* This struct is used most often in `solana-sdk` and related crates to contain
* a [SHA-256] hash, but may instead contain a [blake3] hash, as created by the
* [`blake3`] module (and used in [`Message::hash`]).
*
* [SHA-256]: https://en.wikipedia.org/wiki/SHA-2
* [blake3]: https://github.com/BLAKE3-team/BLAKE3
* [`blake3`]: crate::blake3
* [`Message::hash`]: crate::message::Message::hash
*/
export class Hash {
free(): void;
/**
* Create a new Hash object
*
* * `value` - optional hash as a base58 encoded string, `Uint8Array`, `[number]`
* @param {any} value
*/
/**
* Create a new Hash object
*
* * `value` - optional hash as a base58 encoded string, `Uint8Array`, `[number]`
* @param {any} value
*/
constructor(value: any);
/**
* Return the base58 string representation of the hash
* @returns {string}
*/
/**
* Return the base58 string representation of the hash
* @returns {string}
*/
toString(): string;
/**
* Checks if two `Hash`s are equal
* @param {Hash} other
* @returns {boolean}
*/
/**
* Checks if two `Hash`s are equal
* @param {Hash} other
* @returns {boolean}
*/
equals(other: Hash): boolean;
/**
* Return the `Uint8Array` representation of the hash
* @returns {Uint8Array}
*/
/**
* Return the `Uint8Array` representation of the hash
* @returns {Uint8Array}
*/
toBytes(): Uint8Array;
}
/**
* A directive for a single invocation of a Solana program.
*
* An instruction specifies which program it is calling, which accounts it may
* read or modify, and additional data that serves as input to the program. One
* or more instructions are included in transactions submitted by Solana
* clients. Instructions are also used to describe [cross-program
* invocations][cpi].
*
* [cpi]: https://docs.solana.com/developing/programming-model/calling-between-programs
*
* During execution, a program will receive a list of account data as one of
* its arguments, in the same order as specified during `Instruction`
* construction.
*
* While Solana is agnostic to the format of the instruction data, it has
* built-in support for serialization via [`borsh`] and [`bincode`].
*
* [`borsh`]: https://docs.rs/borsh/latest/borsh/
* [`bincode`]: https://docs.rs/bincode/latest/bincode/
*
* # Specifying account metadata
*
* When constructing an [`Instruction`], a list of all accounts that may be
* read or written during the execution of that instruction must be supplied as
* [`AccountMeta`] values.
*
* Any account whose data may be mutated by the program during execution must
* be specified as writable. During execution, writing to an account that was
* not specified as writable will cause the transaction to fail. Writing to an
* account that is not owned by the program will cause the transaction to fail.
*
* Any account whose lamport balance may be mutated by the program during
* execution must be specified as writable. During execution, mutating the
* lamports of an account that was not specified as writable will cause the
* transaction to fail. While _subtracting_ lamports from an account not owned
* by the program will cause the transaction to fail, _adding_ lamports to any
* account is allowed, as long is it is mutable.
*
* Accounts that are not read or written by the program may still be specified
* in an `Instruction`'s account list. These will affect scheduling of program
* execution by the runtime, but will otherwise be ignored.
*
* When building a transaction, the Solana runtime coalesces all accounts used
* by all instructions in that transaction, along with accounts and permissions
* required by the runtime, into a single account list. Some accounts and
* account permissions required by the runtime to process a transaction are
* _not_ required to be included in an `Instruction`s account list. These
* include:
*
* - The program ID &mdash; it is a separate field of `Instruction`
* - The transaction's fee-paying account &mdash; it is added during [`Message`]
* construction. A program may still require the fee payer as part of the
* account list if it directly references it.
*
* [`Message`]: crate::message::Message
*
* Programs may require signatures from some accounts, in which case they
* should be specified as signers during `Instruction` construction. The
* program must still validate during execution that the account is a signer.
*/
* A directive for a single invocation of a Solana program.
*
* An instruction specifies which program it is calling, which accounts it may
* read or modify, and additional data that serves as input to the program. One
* or more instructions are included in transactions submitted by Solana
* clients. Instructions are also used to describe [cross-program
* invocations][cpi].
*
* [cpi]: https://docs.solana.com/developing/programming-model/calling-between-programs
*
* During execution, a program will receive a list of account data as one of
* its arguments, in the same order as specified during `Instruction`
* construction.
*
* While Solana is agnostic to the format of the instruction data, it has
* built-in support for serialization via [`borsh`] and [`bincode`].
*
* [`borsh`]: https://docs.rs/borsh/latest/borsh/
* [`bincode`]: https://docs.rs/bincode/latest/bincode/
*
* # Specifying account metadata
*
* When constructing an [`Instruction`], a list of all accounts that may be
* read or written during the execution of that instruction must be supplied as
* [`AccountMeta`] values.
*
* Any account whose data may be mutated by the program during execution must
* be specified as writable. During execution, writing to an account that was
* not specified as writable will cause the transaction to fail. Writing to an
* account that is not owned by the program will cause the transaction to fail.
*
* Any account whose lamport balance may be mutated by the program during
* execution must be specified as writable. During execution, mutating the
* lamports of an account that was not specified as writable will cause the
* transaction to fail. While _subtracting_ lamports from an account not owned
* by the program will cause the transaction to fail, _adding_ lamports to any
* account is allowed, as long is it is mutable.
*
* Accounts that are not read or written by the program may still be specified
* in an `Instruction`'s account list. These will affect scheduling of program
* execution by the runtime, but will otherwise be ignored.
*
* When building a transaction, the Solana runtime coalesces all accounts used
* by all instructions in that transaction, along with accounts and permissions
* required by the runtime, into a single account list. Some accounts and
* account permissions required by the runtime to process a transaction are
* _not_ required to be included in an `Instruction`s account list. These
* include:
*
* - The program ID &mdash; it is a separate field of `Instruction`
* - The transaction's fee-paying account &mdash; it is added during [`Message`]
* construction. A program may still require the fee payer as part of the
* account list if it directly references it.
*
* [`Message`]: crate::message::Message
*
* Programs may require signatures from some accounts, in which case they
* should be specified as signers during `Instruction` construction. The
* program must still validate during execution that the account is a signer.
*/
export class Instruction {
free(): void;
}
/**
*/
*/
export class Instructions {
free(): void;
/**
*/
/**
*/
constructor();
/**
* @param {Instruction} instruction
*/
/**
* @param {Instruction} instruction
*/
push(instruction: Instruction): void;
}
/**
* A Solana transaction message (legacy).
*
* See the [`message`] module documentation for further description.
*
* [`message`]: crate::message
*
* Some constructors accept an optional `payer`, the account responsible for
* paying the cost of executing a transaction. In most cases, callers should
* specify the payer explicitly in these constructors. In some cases though,
* the caller is not _required_ to specify the payer, but is still allowed to:
* in the `Message` structure, the first account is always the fee-payer, so if
* the caller has knowledge that the first account of the constructed
* transaction's `Message` is both a signer and the expected fee-payer, then
* redundantly specifying the fee-payer is not strictly required.
*/
* A Solana transaction message (legacy).
*
* See the [`message`] module documentation for further description.
*
* [`message`]: crate::message
*
* Some constructors accept an optional `payer`, the account responsible for
* paying the cost of executing a transaction. In most cases, callers should
* specify the payer explicitly in these constructors. In some cases though,
* the caller is not _required_ to specify the payer, but is still allowed to:
* in the `Message` structure, the first account is always the fee-payer, so if
* the caller has knowledge that the first account of the constructed
* transaction's `Message` is both a signer and the expected fee-payer, then
* redundantly specifying the fee-payer is not strictly required.
*/
export class Message {
free(): void;
/**
* The id of a recent ledger entry.
*/
/**
* The id of a recent ledger entry.
*/
recent_blockhash: Hash;
}
/**
* The address of a [Solana account][acc].
*
* Some account addresses are [ed25519] public keys, with corresponding secret
* keys that are managed off-chain. Often, though, account addresses do not
* have corresponding secret keys &mdash; as with [_program derived
* addresses_][pdas] &mdash; or the secret key is not relevant to the operation
* of a program, and may have even been disposed of. As running Solana programs
* can not safely create or manage secret keys, the full [`Keypair`] is not
* defined in `solana-program` but in `solana-sdk`.
*
* [acc]: https://docs.solana.com/developing/programming-model/accounts
* [ed25519]: https://ed25519.cr.yp.to/
* [pdas]: https://docs.solana.com/developing/programming-model/calling-between-programs#program-derived-addresses
* [`Keypair`]: https://docs.rs/solana-sdk/latest/solana_sdk/signer/keypair/struct.Keypair.html
*/
* The address of a [Solana account][acc].
*
* Some account addresses are [ed25519] public keys, with corresponding secret
* keys that are managed off-chain. Often, though, account addresses do not
* have corresponding secret keys &mdash; as with [_program derived
* addresses_][pdas] &mdash; or the secret key is not relevant to the operation
* of a program, and may have even been disposed of. As running Solana programs
* can not safely create or manage secret keys, the full [`Keypair`] is not
* defined in `solana-program` but in `solana-sdk`.
*
* [acc]: https://docs.solana.com/developing/programming-model/accounts
* [ed25519]: https://ed25519.cr.yp.to/
* [pdas]: https://docs.solana.com/developing/programming-model/calling-between-programs#program-derived-addresses
* [`Keypair`]: https://docs.rs/solana-sdk/latest/solana_sdk/signer/keypair/struct.Keypair.html
*/
export class Pubkey {
free(): void;
/**
* Create a new Pubkey object
*
* * `value` - optional public key as a base58 encoded string, `Uint8Array`, `[number]`
* @param {any} value
*/
/**
* Create a new Pubkey object
*
* * `value` - optional public key as a base58 encoded string, `Uint8Array`, `[number]`
* @param {any} value
*/
constructor(value: any);
/**
* Return the base58 string representation of the public key
* @returns {string}
*/
/**
* Return the base58 string representation of the public key
* @returns {string}
*/
toString(): string;
/**
* Check if a `Pubkey` is on the ed25519 curve.
* @returns {boolean}
*/
/**
* Check if a `Pubkey` is on the ed25519 curve.
* @returns {boolean}
*/
isOnCurve(): boolean;
/**
* Checks if two `Pubkey`s are equal
* @param {Pubkey} other
* @returns {boolean}
*/
/**
* Checks if two `Pubkey`s are equal
* @param {Pubkey} other
* @returns {boolean}
*/
equals(other: Pubkey): boolean;
/**
* Return the `Uint8Array` representation of the public key
* @returns {Uint8Array}
*/
/**
* Return the `Uint8Array` representation of the public key
* @returns {Uint8Array}
*/
toBytes(): Uint8Array;
/**
* Derive a Pubkey from another Pubkey, string seed, and a program id
* @param {Pubkey} base
* @param {string} seed
* @param {Pubkey} owner
* @returns {Pubkey}
*/
/**
* Derive a Pubkey from another Pubkey, string seed, and a program id
* @param {Pubkey} base
* @param {string} seed
* @param {Pubkey} owner
* @returns {Pubkey}
*/
static createWithSeed(base: Pubkey, seed: string, owner: Pubkey): Pubkey;
/**
* Derive a program address from seeds and a program id
* @param {any[]} seeds
* @param {Pubkey} program_id
* @returns {Pubkey}
*/
/**
* Derive a program address from seeds and a program id
* @param {any[]} seeds
* @param {Pubkey} program_id
* @returns {Pubkey}
*/
static createProgramAddress(seeds: any[], program_id: Pubkey): Pubkey;
/**
* Find a valid program address
*
* Returns:
* * `[PubKey, number]` - the program address and bump seed
* @param {any[]} seeds
* @param {Pubkey} program_id
* @returns {any}
*/
/**
* Find a valid program address
*
* Returns:
* * `[PubKey, number]` - the program address and bump seed
* @param {any[]} seeds
* @param {Pubkey} program_id
* @returns {any}
*/
static findProgramAddress(seeds: any[], program_id: Pubkey): any;
}

View File

@ -8,13 +8,37 @@ export function __wbg_get_message_recent_blockhash(a: number): number;
export function __wbg_set_message_recent_blockhash(a: number, b: number): void;
export function solana_program_init(): void;
export function systeminstruction_createAccount(a: number, b: number, c: number, d: number, e: number): number;
export function systeminstruction_createAccountWithSeed(a: number, b: number, c: number, d: number, e: number, f: number, g: number, h: number): number;
export function systeminstruction_createAccountWithSeed(
a: number,
b: number,
c: number,
d: number,
e: number,
f: number,
g: number,
h: number,
): number;
export function systeminstruction_assign(a: number, b: number): number;
export function systeminstruction_assignWithSeed(a: number, b: number, c: number, d: number, e: number): number;
export function systeminstruction_transfer(a: number, b: number, c: number): number;
export function systeminstruction_transferWithSeed(a: number, b: number, c: number, d: number, e: number, f: number, g: number): number;
export function systeminstruction_transferWithSeed(
a: number,
b: number,
c: number,
d: number,
e: number,
f: number,
g: number,
): number;
export function systeminstruction_allocate(a: number, b: number): number;
export function systeminstruction_allocateWithSeed(a: number, b: number, c: number, d: number, e: number, f: number): number;
export function systeminstruction_allocateWithSeed(
a: number,
b: number,
c: number,
d: number,
e: number,
f: number,
): number;
export function systeminstruction_createNonceAccount(a: number, b: number, c: number, d: number): number;
export function systeminstruction_advanceNonceAccount(a: number, b: number): number;
export function systeminstruction_withdrawNonceAccount(a: number, b: number, c: number, d: number): number;

View File

@ -1,31 +1,31 @@
import path from 'path'
import Dotenv from 'dotenv-webpack'
import { fileURLToPath } from "url";
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
export default {
mode: "development",
entry: "./src/index.ts",
output: {
filename: "main.js",
path: path.resolve(__dirname, "dist"),
libraryTarget: "commonjs2",
},
target: "node",
resolve: {
extensions: [".ts", ".js"],
},
module: {
rules: [
{
test: /\.ts$/,
use: "ts-loader",
exclude: /node_modules/,
},
],
},
devtool: "source-map",
plugins: [new Dotenv()],
};
import path from "path";
import Dotenv from "dotenv-webpack";
import { fileURLToPath } from "url";
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
export default {
mode: "development",
entry: "./src/index.ts",
output: {
filename: "main.js",
path: path.resolve(__dirname, "dist"),
libraryTarget: "commonjs2",
},
target: "node",
resolve: {
extensions: [".ts", ".js"],
},
module: {
rules: [
{
test: /\.ts$/,
use: "ts-loader",
exclude: /node_modules/,
},
],
},
devtool: "source-map",
plugins: [new Dotenv()],
};

View File

@ -11,14 +11,14 @@ export default {
filename: "main.js",
path: path.resolve(__dirname, "dist"),
libraryTarget: "commonjs2",
clean: true
clean: true,
},
target: "node",
resolve: {
extensions: [".ts", ".js"]
extensions: [".ts", ".js"],
},
module: {
rules: [
{
@ -26,16 +26,14 @@ export default {
use: {
loader: "ts-loader",
options: {
transpileOnly: true
}
transpileOnly: true,
},
},
exclude: /node_modules/
}
]
exclude: /node_modules/,
},
],
},
plugins: [
],
plugins: [],
devtool: "source-map",
};