CodePrism Moth Specifications #250
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: CodePrism Moth Specifications | |
on: | |
push: | |
branches: [ main, develop, rmcp-work ] | |
paths: | |
- 'crates/codeprism-moth-specs/**' | |
- 'crates/mandrel-mcp-th/**' | |
- 'crates/codeprism-mcp-server/**' | |
- '.github/workflows/codeprism-moth-specs.yml' | |
pull_request: | |
branches: [ main ] | |
paths: | |
- 'crates/codeprism-moth-specs/**' | |
- 'crates/mandrel-mcp-th/**' | |
- 'crates/codeprism-mcp-server/**' | |
schedule: | |
# Run comprehensive tests twice daily | |
- cron: '0 6,18 * * *' | |
workflow_dispatch: | |
inputs: | |
stress_testing: | |
description: 'Enable stress testing with large projects' | |
required: false | |
default: false | |
type: boolean | |
target_languages: | |
description: 'Target languages (comma-separated: rust,python,java,javascript)' | |
required: false | |
default: 'rust,python,java,javascript' | |
env: | |
CARGO_TERM_COLOR: always | |
RUST_BACKTRACE: 1 | |
RUST_LOG: info | |
concurrency: | |
group: codeprism-specs-${{ github.workflow }}-${{ github.ref }} | |
cancel-in-progress: true | |
jobs: | |
# Strategy selection for languages to test | |
test-strategy: | |
name: Test Strategy | |
runs-on: ubuntu-latest | |
timeout-minutes: 5 | |
outputs: | |
languages: ${{ steps.strategy.outputs.languages }} | |
steps: | |
- name: Determine test languages | |
id: strategy | |
shell: bash | |
run: | | |
languages='["rust", "python", "java", "javascript"]' | |
if [[ "${{ github.event_name }}" == "workflow_dispatch" && -n "${{ github.event.inputs.target_languages }}" ]]; then | |
# Parse manual input | |
IFS=',' read -ra LANGS <<< "${{ github.event.inputs.target_languages }}" | |
languages='[' | |
for lang in "${LANGS[@]}"; do | |
lang=$(echo "$lang" | xargs) # trim whitespace | |
languages="${languages}\"${lang}\"," | |
done | |
languages="${languages%,}]" | |
fi | |
echo "languages=$languages" >> $GITHUB_OUTPUT | |
echo "Testing languages: $languages" | |
# Matrix testing of CodePrism specifications | |
codeprism-comprehensive: | |
name: CodePrism ${{ matrix.language }} | |
runs-on: ubuntu-latest | |
needs: [test-strategy] | |
timeout-minutes: 45 | |
strategy: | |
matrix: | |
language: ${{ fromJson(needs.test-strategy.outputs.languages) }} | |
fail-fast: false | |
steps: | |
- name: Checkout repository | |
uses: actions/checkout@v4 | |
- name: Setup Rust environment | |
uses: ./.github/actions/setup-rust | |
with: | |
toolchain: stable | |
cache-key: codeprism-specs-${{ matrix.language }} | |
- name: Install language-specific dependencies | |
shell: bash | |
run: | | |
echo "=== Installing Dependencies for ${{ matrix.language }} ===" | |
sudo apt-get update | |
sudo apt-get install -y build-essential pkg-config python3 python3-pip jq time | |
case "${{ matrix.language }}" in | |
"java") | |
sudo apt-get install -y openjdk-11-jdk maven | |
echo "Java version: $(java -version 2>&1 | head -1)" | |
;; | |
"javascript") | |
curl -fsSL https://deb.nodesource.com/setup_18.x | sudo -E bash - | |
sudo apt-get install -y nodejs | |
echo "Node.js version: $(node --version)" | |
echo "npm version: $(npm --version)" | |
;; | |
"python") | |
pip3 install psutil matplotlib pandas numpy | |
echo "Python version: $(python3 --version)" | |
;; | |
esac | |
- name: Verify CodePrism specification | |
shell: bash | |
run: | | |
SPEC_FILE="crates/codeprism-moth-specs/codeprism/comprehensive/codeprism-${{ matrix.language }}-comprehensive.yaml" | |
if [[ ! -f "$SPEC_FILE" ]]; then | |
echo "❌ CodePrism ${{ matrix.language }} specification not found: $SPEC_FILE" | |
exit 1 | |
fi | |
echo "✅ CodePrism ${{ matrix.language }} specification found: $SPEC_FILE" | |
# Validate YAML syntax | |
python3 -c " | |
import yaml | |
import sys | |
try: | |
with open('$SPEC_FILE', 'r') as f: | |
yaml.safe_load(f) | |
print('✅ YAML syntax valid') | |
except Exception as e: | |
print(f'❌ YAML syntax error: {e}') | |
sys.exit(1) | |
" | |
- name: Build optimized components | |
shell: bash | |
run: | | |
echo "=== Building Optimized Components ===" | |
# Build with native optimizations | |
RUSTFLAGS="-C target-cpu=native" cargo build --release \ | |
--package mandrel-mcp-th \ | |
--package codeprism-mcp-server \ | |
--bin moth \ | |
--bin codeprism | |
# Verify binaries | |
if [[ ! -f "target/release/moth" ]] || [[ ! -f "target/release/codeprism" ]]; then | |
echo "❌ Required binaries not found after build" | |
exit 1 | |
fi | |
echo "✅ Components built successfully" | |
./target/release/moth --version | |
- name: Prepare CodePrism test environment | |
shell: bash | |
run: | | |
echo "=== Preparing CodePrism Test Environment ===" | |
# Verify test specifications exist | |
SPEC_FILE="crates/codeprism-moth-specs/codeprism/comprehensive/codeprism-${{ matrix.language }}-comprehensive.yaml" | |
if [[ ! -f "$SPEC_FILE" ]]; then | |
echo "❌ Test specification not found: $SPEC_FILE" | |
exit 1 | |
fi | |
echo "✅ Test specification found: $SPEC_FILE" | |
echo "✅ Test environment ready - moth will manage MCP server lifecycle" | |
- name: Execute comprehensive tests | |
id: comprehensive-tests | |
shell: bash | |
run: | | |
echo "=== Executing CodePrism ${{ matrix.language }} Tests ===" | |
SPEC_FILE="crates/codeprism-moth-specs/codeprism/comprehensive/codeprism-${{ matrix.language }}-comprehensive.yaml" | |
OUTPUT_FILE="results-${{ matrix.language }}.json" | |
# Configure test parameters | |
timeout_mins=40 | |
[[ "${{ github.event.inputs.stress_testing }}" == "true" ]] && timeout_mins=90 | |
START_TIME=$(date +%s) | |
# Execute comprehensive test suite | |
timeout ${timeout_mins}m ./target/release/moth run \ | |
"$SPEC_FILE" \ | |
--output "./reports" \ | |
${{ github.event.inputs.stress_testing == 'true' && '--verbose' || '' }} | |
END_TIME=$(date +%s) | |
DURATION=$((END_TIME - START_TIME)) | |
echo "DURATION=$DURATION" >> $GITHUB_OUTPUT | |
# Verify and report results | |
if [[ -d "./reports" ]] && [[ -n "$(ls -A ./reports 2>/dev/null)" ]]; then | |
echo "✅ Test execution completed in ${DURATION}s" | |
# Look for generated reports | |
echo "📊 Generated reports:" | |
ls -la ./reports/ || echo "No reports directory found" | |
# Try to find and summarize results | |
for report in ./reports/*.json; do | |
if [[ -f "$report" ]]; then | |
echo "Found report: $report" | |
if command -v jq >/dev/null 2>&1; then | |
jq -r '.summary // "No summary available"' "$report" || echo "Could not parse $report" | |
fi | |
fi | |
done | |
else | |
echo "⚠️ No test reports generated - this may indicate test completion without detailed results" | |
echo "Test execution completed in ${DURATION}s" | |
fi | |
- name: Analyze performance metrics | |
if: always() | |
shell: bash | |
run: | | |
echo "=== Performance Analysis ===" | |
OUTPUT_FILE="results-${{ matrix.language }}.json" | |
DURATION="${{ steps.comprehensive-tests.outputs.DURATION }}" | |
if [[ -f "$OUTPUT_FILE" && -n "$DURATION" ]]; then | |
echo "🕒 Execution Time: ${DURATION}s" | |
if command -v jq >/dev/null 2>&1; then | |
# Extract performance metrics if available | |
avg_latency=$(jq -r '.performance.average_latency_ms // "N/A"' "$OUTPUT_FILE") | |
max_memory=$(jq -r '.performance.max_memory_mb // "N/A"' "$OUTPUT_FILE") | |
echo "📈 Performance Metrics:" | |
echo " Average Latency: ${avg_latency}ms" | |
echo " Peak Memory: ${max_memory}MB" | |
echo " Total Duration: ${DURATION}s" | |
fi | |
fi | |
# Performance thresholds (fail if exceeded) | |
if [[ -n "$DURATION" && "$DURATION" -gt 2400 ]]; then # 40 minutes | |
echo "⚠️ Warning: Test execution exceeded expected duration (${DURATION}s > 2400s)" | |
fi | |
- name: Upload test results | |
if: always() | |
uses: actions/upload-artifact@v4 | |
with: | |
name: codeprism-${{ matrix.language }}-results | |
path: | | |
results-${{ matrix.language }}.json | |
server.log | |
retention-days: 7 | |
- name: Cleanup server | |
if: always() | |
shell: bash | |
run: | | |
SERVER_PID="${{ steps.server.outputs.SERVER_PID }}" | |
if [[ -n "$SERVER_PID" ]] && kill -0 "$SERVER_PID" 2>/dev/null; then | |
echo "Stopping CodePrism MCP server (PID: $SERVER_PID)" | |
kill "$SERVER_PID" | |
sleep 2 | |
kill -9 "$SERVER_PID" 2>/dev/null || true | |
fi | |
# Aggregate results and summary | |
test-summary: | |
name: Test Summary | |
runs-on: ubuntu-latest | |
needs: [test-strategy, codeprism-comprehensive] | |
if: always() | |
timeout-minutes: 10 | |
steps: | |
- name: Download all test results | |
uses: actions/download-artifact@v4 | |
with: | |
path: ./results | |
- name: Generate comprehensive summary | |
shell: bash | |
run: | | |
echo "=== CodePrism Moth Specifications Test Summary ===" | |
echo "Tested Languages: ${{ needs.test-strategy.outputs.languages }}" | |
echo "" | |
# Count job results | |
languages=($(echo '${{ needs.test-strategy.outputs.languages }}' | jq -r '.[]')) | |
failed_languages=() | |
overall_status="${{ needs.codeprism-comprehensive.result }}" | |
for language in "${languages[@]}"; do | |
echo " $language: $overall_status" | |
[[ "$overall_status" == "failure" ]] && failed_languages+=("$language") | |
done | |
echo "" | |
if [[ ${#failed_languages[@]} -gt 0 ]]; then | |
echo "❌ Failed languages: ${failed_languages[*]}" | |
echo "Check individual job logs for details" | |
exit 1 | |
else | |
echo "✅ All CodePrism specifications passed successfully!" | |
fi |