Merge branch origin/add-agents into main

This commit is contained in:
Alexa Amundson
2025-12-01 21:24:51 -06:00
5 changed files with 728 additions and 0 deletions

111
.github/workflows/agent-code-review.yml vendored Normal file
View File

@@ -0,0 +1,111 @@
name: "🤖 Agent: Code Review"
on:
pull_request:
types: [opened, synchronize, reopened]
permissions:
contents: read
pull-requests: write
jobs:
code-review:
name: Code Review Agent
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v44
with:
files: |
**/*.ts
**/*.tsx
**/*.js
**/*.jsx
**/*.py
**/*.go
- name: Analyze code quality
if: steps.changed-files.outputs.any_changed == 'true'
uses: actions/github-script@v7
with:
script: |
const changedFiles = '${{ steps.changed-files.outputs.all_changed_files }}'.split(' ').filter(f => f);
let reviewComments = [];
let summary = '## 🤖 Code Review Agent Report\n\n';
summary += `Analyzed **${changedFiles.length}** changed files.\n\n`;
// Analyze patterns
const patterns = {
'console.log': { severity: 'warning', msg: 'Consider removing debug logs before merging' },
'TODO': { severity: 'info', msg: 'Found TODO comment - ensure this is tracked' },
'FIXME': { severity: 'warning', msg: 'FIXME found - should be addressed' },
'any': { severity: 'warning', msg: 'Avoid using `any` type - prefer specific types' },
'eslint-disable': { severity: 'info', msg: 'ESLint rule disabled - ensure this is intentional' },
'password': { severity: 'error', msg: '⚠️ Possible hardcoded credential detected' },
'secret': { severity: 'error', msg: '⚠️ Possible secret in code' },
};
const { execSync } = require('child_process');
let issues = { error: 0, warning: 0, info: 0 };
for (const file of changedFiles) {
try {
const content = require('fs').readFileSync(file, 'utf8');
const lines = content.split('\n');
lines.forEach((line, idx) => {
for (const [pattern, config] of Object.entries(patterns)) {
if (line.toLowerCase().includes(pattern.toLowerCase())) {
issues[config.severity]++;
reviewComments.push(`- **${file}:${idx + 1}** [${config.severity.toUpperCase()}] ${config.msg}`);
}
}
});
} catch (e) {
// File might not exist in working directory
}
}
// Build summary
if (issues.error > 0) {
summary += `### ❌ Errors: ${issues.error}\n`;
}
if (issues.warning > 0) {
summary += `### ⚠️ Warnings: ${issues.warning}\n`;
}
if (issues.info > 0) {
summary += `### Info: ${issues.info}\n`;
}
if (reviewComments.length > 0) {
summary += '\n### Details\n\n';
summary += reviewComments.slice(0, 20).join('\n');
if (reviewComments.length > 20) {
summary += `\n\n*...and ${reviewComments.length - 20} more issues*`;
}
} else {
summary += '\n✅ No issues found! Code looks good.\n';
}
summary += '\n\n---\n*🤖 Automated review by Code Review Agent*';
// Post comment
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.payload.pull_request.number,
body: summary
});
// Fail if errors found
if (issues.error > 0) {
core.setFailed(`Found ${issues.error} error(s) in code review`);
}

View File

@@ -0,0 +1,137 @@
name: "📚 Agent: Documentation"
on:
pull_request:
types: [opened, synchronize, reopened]
permissions:
contents: read
pull-requests: write
jobs:
documentation:
name: Documentation Agent
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v44
- name: Analyze documentation coverage
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
const path = require('path');
const changedFiles = '${{ steps.changed-files.outputs.all_changed_files }}'.split(' ').filter(f => f);
let report = '## 📚 Documentation Agent Report\n\n';
let suggestions = [];
let stats = {
codeFiles: 0,
docFiles: 0,
hasJsdoc: 0,
missingJsdoc: 0,
readmeUpdated: false
};
// Categorize files
const codeExtensions = ['.ts', '.tsx', '.js', '.jsx', '.py', '.go'];
const docExtensions = ['.md', '.mdx', '.rst', '.txt'];
for (const file of changedFiles) {
const ext = path.extname(file);
if (codeExtensions.includes(ext)) {
stats.codeFiles++;
// Check for JSDoc/docstrings
try {
const content = fs.readFileSync(file, 'utf8');
// Check for exported functions without documentation
const exportedFunctions = content.match(/export\s+(async\s+)?function\s+\w+/g) || [];
const jsdocBlocks = content.match(/\/\*\*[\s\S]*?\*\//g) || [];
if (exportedFunctions.length > jsdocBlocks.length) {
stats.missingJsdoc++;
suggestions.push(`📝 **${file}**: Consider adding JSDoc comments to exported functions`);
} else if (jsdocBlocks.length > 0) {
stats.hasJsdoc++;
}
// Check for complex functions that need docs
const lines = content.split('\n').length;
if (lines > 200 && jsdocBlocks.length === 0) {
suggestions.push(`📖 **${file}**: Large file (${lines} lines) without documentation`);
}
} catch (e) {
// File might not exist
}
}
if (docExtensions.includes(ext)) {
stats.docFiles++;
}
if (file.toLowerCase().includes('readme')) {
stats.readmeUpdated = true;
}
}
// Build report
report += '### 📊 Documentation Stats\n\n';
report += `| Metric | Value |\n`;
report += `|--------|-------|\n`;
report += `| Code files changed | ${stats.codeFiles} |\n`;
report += `| Doc files changed | ${stats.docFiles} |\n`;
report += `| Files with JSDoc | ${stats.hasJsdoc} |\n`;
report += `| Files needing docs | ${stats.missingJsdoc} |\n`;
report += `| README updated | ${stats.readmeUpdated ? '✅' : '❌'} |\n\n`;
// Calculate documentation score
const docScore = stats.codeFiles > 0
? Math.round((stats.hasJsdoc / stats.codeFiles) * 100)
: 100;
report += `### 📈 Documentation Score: ${docScore}%\n\n`;
if (docScore >= 80) {
report += '✅ Great documentation coverage!\n\n';
} else if (docScore >= 50) {
report += '⚠️ Documentation could be improved.\n\n';
} else {
report += '❌ Documentation coverage is low. Please add docs.\n\n';
}
// Suggestions
if (suggestions.length > 0) {
report += '### 💡 Suggestions\n\n';
suggestions.slice(0, 10).forEach(s => report += `- ${s}\n`);
if (suggestions.length > 10) {
report += `\n*...and ${suggestions.length - 10} more suggestions*\n`;
}
}
// Tips
if (stats.codeFiles > 0 && !stats.readmeUpdated) {
report += '\n### 💡 Tip\n';
report += 'Consider updating the README if this PR introduces new features or API changes.\n';
}
report += '\n---\n*📚 Automated review by Documentation Agent*';
// Post comment
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.payload.pull_request.number,
body: report
});

180
.github/workflows/agent-performance.yml vendored Normal file
View File

@@ -0,0 +1,180 @@
name: "⚡ Agent: Performance"
on:
pull_request:
types: [opened, synchronize, reopened]
permissions:
contents: read
pull-requests: write
jobs:
performance:
name: Performance Agent
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Get PR stats
id: pr-stats
run: |
# Get diff stats
ADDITIONS=$(git diff --shortstat origin/${{ github.base_ref }}...HEAD | grep -oP '\d+(?= insertion)' || echo 0)
DELETIONS=$(git diff --shortstat origin/${{ github.base_ref }}...HEAD | grep -oP '\d+(?= deletion)' || echo 0)
FILES_CHANGED=$(git diff --name-only origin/${{ github.base_ref }}...HEAD | wc -l)
echo "additions=$ADDITIONS" >> $GITHUB_OUTPUT
echo "deletions=$DELETIONS" >> $GITHUB_OUTPUT
echo "files_changed=$FILES_CHANGED" >> $GITHUB_OUTPUT
- name: Analyze bundle size impact
id: bundle
run: |
# Check if package.json exists and get dependencies
if [ -f "package.json" ]; then
DEPS=$(cat package.json | jq '.dependencies | length // 0')
DEV_DEPS=$(cat package.json | jq '.devDependencies | length // 0')
echo "deps=$DEPS" >> $GITHUB_OUTPUT
echo "dev_deps=$DEV_DEPS" >> $GITHUB_OUTPUT
else
echo "deps=0" >> $GITHUB_OUTPUT
echo "dev_deps=0" >> $GITHUB_OUTPUT
fi
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v44
- name: Performance analysis
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
const path = require('path');
const changedFiles = '${{ steps.changed-files.outputs.all_changed_files }}'.split(' ').filter(f => f);
const additions = parseInt('${{ steps.pr-stats.outputs.additions }}') || 0;
const deletions = parseInt('${{ steps.pr-stats.outputs.deletions }}') || 0;
const filesChanged = parseInt('${{ steps.pr-stats.outputs.files_changed }}') || 0;
const deps = parseInt('${{ steps.bundle.outputs.deps }}') || 0;
const devDeps = parseInt('${{ steps.bundle.outputs.dev_deps }}') || 0;
let report = '## ⚡ Performance Agent Report\n\n';
let warnings = [];
let suggestions = [];
// PR Size Analysis
report += '### 📦 PR Size Analysis\n\n';
report += `| Metric | Value |\n`;
report += `|--------|-------|\n`;
report += `| Files changed | ${filesChanged} |\n`;
report += `| Lines added | +${additions} |\n`;
report += `| Lines removed | -${deletions} |\n`;
report += `| Net change | ${additions - deletions > 0 ? '+' : ''}${additions - deletions} |\n`;
report += `| Dependencies | ${deps} |\n`;
report += `| Dev Dependencies | ${devDeps} |\n\n`;
// PR Size Rating
const totalChanges = additions + deletions;
let sizeRating = '';
if (totalChanges < 100) {
sizeRating = '🟢 Small PR - Easy to review';
} else if (totalChanges < 500) {
sizeRating = '🟡 Medium PR - Moderate review effort';
} else if (totalChanges < 1000) {
sizeRating = '🟠 Large PR - Consider breaking down';
warnings.push('Large PR detected. Consider splitting into smaller PRs for easier review.');
} else {
sizeRating = '🔴 Very Large PR - Difficult to review';
warnings.push('Very large PR! This will be difficult to review. Strongly consider breaking into smaller PRs.');
}
report += `**Size Rating:** ${sizeRating}\n\n`;
// Performance patterns check
report += '### 🔍 Performance Patterns\n\n';
const perfPatterns = [
{ pattern: /\.forEach\s*\(/g, msg: 'forEach loop - consider for...of for better performance', severity: 'info' },
{ pattern: /JSON\.parse\s*\(.*JSON\.stringify/g, msg: 'Deep clone via JSON - consider structuredClone()', severity: 'warning' },
{ pattern: /new\s+RegExp\s*\(/g, msg: 'Dynamic RegExp creation - consider caching if used repeatedly', severity: 'info' },
{ pattern: /document\.querySelector.*loop|for.*querySelector/gi, msg: 'DOM query in loop - cache selectors outside loop', severity: 'warning' },
{ pattern: /\bawait\b.*\bawait\b.*\bawait\b/g, msg: 'Multiple sequential awaits - consider Promise.all()', severity: 'warning' },
{ pattern: /\.filter\(.*\)\.map\(/g, msg: 'filter().map() chain - consider reduce() or single pass', severity: 'info' },
{ pattern: /useEffect.*\[\s*\]/g, msg: 'Empty dependency array - ensure this is intentional', severity: 'info' },
{ pattern: /new\s+Date\(\).*loop|for.*new\s+Date/gi, msg: 'Date creation in loop - cache Date object', severity: 'warning' },
];
let patternFindings = [];
for (const file of changedFiles) {
try {
const content = fs.readFileSync(file, 'utf8');
for (const { pattern, msg, severity } of perfPatterns) {
if (pattern.test(content)) {
patternFindings.push({ file, msg, severity });
}
}
// Check file size
const lines = content.split('\n').length;
if (lines > 500) {
warnings.push(`\`${file}\` has ${lines} lines - consider splitting into smaller modules`);
}
} catch (e) {}
}
if (patternFindings.length > 0) {
patternFindings.slice(0, 10).forEach(({ file, msg, severity }) => {
const icon = severity === 'warning' ? '⚠️' : '';
report += `- ${icon} **${file}**: ${msg}\n`;
});
if (patternFindings.length > 10) {
report += `\n*...and ${patternFindings.length - 10} more findings*\n`;
}
} else {
report += '✅ No performance anti-patterns detected!\n';
}
report += '\n';
// Warnings
if (warnings.length > 0) {
report += '### ⚠️ Warnings\n\n';
warnings.forEach(w => report += `- ${w}\n`);
report += '\n';
}
// Bundle impact estimation
report += '### 📊 Impact Assessment\n\n';
// Check for new dependencies in package.json changes
const pkgChanged = changedFiles.some(f => f.includes('package.json'));
if (pkgChanged) {
report += '⚠️ `package.json` was modified - bundle size may be affected.\n';
report += 'Consider running bundle analysis after merging.\n\n';
}
// Recommendations
report += '### 💡 Recommendations\n\n';
if (totalChanges > 500) {
report += '- Consider breaking this PR into smaller, focused changes\n';
}
if (patternFindings.some(f => f.severity === 'warning')) {
report += '- Review the performance warnings above\n';
}
report += '- Run performance tests before and after merging\n';
report += '- Monitor production metrics after deployment\n';
report += '\n---\n*⚡ Automated analysis by Performance Agent*';
// Post comment
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.payload.pull_request.number,
body: report
});

View File

@@ -0,0 +1,127 @@
name: "🛡️ Agent: Security Audit"
on:
pull_request:
types: [opened, synchronize, reopened]
permissions:
contents: read
pull-requests: write
security-events: write
jobs:
security-audit:
name: Security Audit Agent
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
- name: Get PR diff
id: diff
run: |
git diff origin/${{ github.base_ref }}...HEAD > pr_diff.txt
echo "diff_size=$(wc -l < pr_diff.txt)" >> $GITHUB_OUTPUT
- name: Run security patterns check
id: security-check
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
const diff = fs.readFileSync('pr_diff.txt', 'utf8');
const securityPatterns = [
{ pattern: /eval\s*\(/gi, severity: 'high', msg: 'Dangerous eval() usage detected' },
{ pattern: /innerHTML\s*=/gi, severity: 'medium', msg: 'innerHTML assignment - potential XSS' },
{ pattern: /document\.write/gi, severity: 'medium', msg: 'document.write usage - potential XSS' },
{ pattern: /dangerouslySetInnerHTML/gi, severity: 'medium', msg: 'React dangerouslySetInnerHTML - ensure sanitized' },
{ pattern: /exec\s*\(/gi, severity: 'high', msg: 'Shell exec detected - potential command injection' },
{ pattern: /subprocess|os\.system/gi, severity: 'high', msg: 'System command execution detected' },
{ pattern: /localStorage\.setItem.*password/gi, severity: 'high', msg: 'Storing password in localStorage' },
{ pattern: /Bearer\s+[A-Za-z0-9\-_]+\.[A-Za-z0-9\-_]+/gi, severity: 'critical', msg: '⚠️ Possible JWT token in code' },
{ pattern: /sk-[A-Za-z0-9]{32,}/gi, severity: 'critical', msg: '⚠️ Possible API key detected' },
{ pattern: /AKIA[0-9A-Z]{16}/gi, severity: 'critical', msg: '⚠️ Possible AWS key detected' },
{ pattern: /ghp_[A-Za-z0-9]{36}/gi, severity: 'critical', msg: '⚠️ Possible GitHub token detected' },
{ pattern: /-----BEGIN (RSA |EC |DSA |OPENSSH )?PRIVATE KEY-----/gi, severity: 'critical', msg: '⚠️ Private key detected!' },
{ pattern: /sql.*['"]\s*\+/gi, severity: 'high', msg: 'Potential SQL injection - use parameterized queries' },
{ pattern: /password\s*[=:]\s*['"][^'"]+['"]/gi, severity: 'high', msg: 'Hardcoded password detected' },
];
let findings = { critical: [], high: [], medium: [], low: [] };
let totalIssues = 0;
for (const { pattern, severity, msg } of securityPatterns) {
const matches = diff.match(pattern);
if (matches) {
findings[severity].push({ msg, count: matches.length });
totalIssues += matches.length;
}
}
// Build report
let report = '## 🛡️ Security Audit Agent Report\n\n';
if (totalIssues === 0) {
report += '✅ **No security issues detected!**\n\n';
report += 'The changes in this PR passed all security checks.\n';
} else {
report += `⚠️ **Found ${totalIssues} potential security issue(s)**\n\n`;
if (findings.critical.length > 0) {
report += '### 🚨 Critical\n';
findings.critical.forEach(f => report += `- ${f.msg} (${f.count} occurrence(s))\n`);
report += '\n';
}
if (findings.high.length > 0) {
report += '### 🔴 High\n';
findings.high.forEach(f => report += `- ${f.msg} (${f.count} occurrence(s))\n`);
report += '\n';
}
if (findings.medium.length > 0) {
report += '### 🟠 Medium\n';
findings.medium.forEach(f => report += `- ${f.msg} (${f.count} occurrence(s))\n`);
report += '\n';
}
}
report += '\n### Checks Performed\n';
report += '- [x] Secrets & API keys scan\n';
report += '- [x] SQL injection patterns\n';
report += '- [x] XSS vulnerability patterns\n';
report += '- [x] Command injection patterns\n';
report += '- [x] Hardcoded credentials\n';
report += '\n---\n*🛡️ Automated audit by Security Agent*';
// Post comment
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.payload.pull_request.number,
body: report
});
// Fail on critical findings
if (findings.critical.length > 0) {
core.setFailed('Critical security issues found! Please review before merging.');
}
return { findings, totalIssues };
- name: Run npm audit (if package.json exists)
continue-on-error: true
run: |
if [ -f "package.json" ]; then
npm audit --audit-level=high || echo "Vulnerabilities found"
fi

View File

@@ -0,0 +1,173 @@
name: "🧪 Agent: Test Coverage"
on:
pull_request:
types: [opened, synchronize, reopened]
permissions:
contents: read
pull-requests: write
jobs:
test-coverage:
name: Test Coverage Agent
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup Node.js
uses: actions/setup-node@v4
with:
node-version: '20'
cache: 'npm'
continue-on-error: true
- name: Get changed files
id: changed-files
uses: tj-actions/changed-files@v44
with:
files: |
**/*.ts
**/*.tsx
**/*.js
**/*.jsx
- name: Analyze test coverage
uses: actions/github-script@v7
with:
script: |
const fs = require('fs');
const path = require('path');
const changedFiles = '${{ steps.changed-files.outputs.all_changed_files }}'.split(' ').filter(f => f);
let report = '## 🧪 Test Coverage Agent Report\n\n';
let stats = {
sourceFiles: [],
testFiles: [],
missingTests: [],
hasTestFramework: false
};
// Check for test framework
try {
const packageJson = JSON.parse(fs.readFileSync('package.json', 'utf8'));
const deps = { ...packageJson.dependencies, ...packageJson.devDependencies };
stats.hasTestFramework = !!(deps.jest || deps.vitest || deps.mocha || deps['@testing-library/react']);
} catch (e) {}
// Analyze changed files
for (const file of changedFiles) {
const isTest = file.includes('.test.') ||
file.includes('.spec.') ||
file.includes('__tests__') ||
file.includes('test/') ||
file.includes('tests/');
if (isTest) {
stats.testFiles.push(file);
} else {
stats.sourceFiles.push(file);
// Check if corresponding test exists
const basename = path.basename(file, path.extname(file));
const dirname = path.dirname(file);
const testPatterns = [
`${dirname}/${basename}.test${path.extname(file)}`,
`${dirname}/${basename}.spec${path.extname(file)}`,
`${dirname}/__tests__/${basename}.test${path.extname(file)}`,
`__tests__/${basename}.test${path.extname(file)}`,
];
let hasTest = false;
for (const testPath of testPatterns) {
if (fs.existsSync(testPath)) {
hasTest = true;
break;
}
}
if (!hasTest && !file.includes('index.') && !file.includes('.d.ts')) {
stats.missingTests.push(file);
}
}
}
// Calculate coverage percentage
const coveragePercent = stats.sourceFiles.length > 0
? Math.round(((stats.sourceFiles.length - stats.missingTests.length) / stats.sourceFiles.length) * 100)
: 100;
// Build report
report += '### 📊 Test Analysis\n\n';
report += `| Metric | Value |\n`;
report += `|--------|-------|\n`;
report += `| Source files changed | ${stats.sourceFiles.length} |\n`;
report += `| Test files changed | ${stats.testFiles.length} |\n`;
report += `| Files with tests | ${stats.sourceFiles.length - stats.missingTests.length} |\n`;
report += `| Files missing tests | ${stats.missingTests.length} |\n`;
report += `| Test framework | ${stats.hasTestFramework ? '✅ Detected' : '❌ Not found'} |\n\n`;
report += `### 📈 Test Coverage Score: ${coveragePercent}%\n\n`;
// Progress bar
const filled = Math.round(coveragePercent / 10);
const empty = 10 - filled;
report += `\`[${'█'.repeat(filled)}${'░'.repeat(empty)}]\`\n\n`;
if (coveragePercent >= 80) {
report += '✅ Excellent test coverage!\n\n';
} else if (coveragePercent >= 50) {
report += '⚠️ Consider adding more tests for better coverage.\n\n';
} else {
report += '❌ Low test coverage. Please add tests for your changes.\n\n';
}
// Missing tests
if (stats.missingTests.length > 0) {
report += '### 🔍 Files Missing Tests\n\n';
stats.missingTests.slice(0, 10).forEach(f => {
report += `- \`${f}\`\n`;
});
if (stats.missingTests.length > 10) {
report += `\n*...and ${stats.missingTests.length - 10} more files*\n`;
}
report += '\n';
}
// Recommendations
report += '### 💡 Recommendations\n\n';
if (!stats.hasTestFramework) {
report += '- Consider adding a test framework (Jest, Vitest, etc.)\n';
}
if (stats.testFiles.length === 0 && stats.sourceFiles.length > 0) {
report += '- No test files in this PR - consider adding tests\n';
}
if (stats.missingTests.length > 0) {
report += '- Add unit tests for the files listed above\n';
}
if (coveragePercent === 100) {
report += '- All changed files have corresponding tests! 🎉\n';
}
report += '\n---\n*🧪 Automated analysis by Test Coverage Agent*';
// Post comment
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: context.payload.pull_request.number,
body: report
});
- name: Run tests (if available)
continue-on-error: true
run: |
if [ -f "package.json" ]; then
npm ci --ignore-scripts 2>/dev/null || npm install --ignore-scripts 2>/dev/null || true
npm test 2>/dev/null || echo "No tests configured or tests failed"
fi