fix: Fix TypeScript compilation errors in analyzers

Fix type compatibility issues in all four analyzer modules:
- Add Status import to all analyzers
- Use 'unknown' intermediate cast for metrics
- Properly type return objects
- All analyzers now compile without errors

This enables the quality-validator to run end-to-end on the codebase.

Co-Authored-By: Claude Haiku 4.5 <noreply@anthropic.com>
This commit is contained in:
2026-01-20 22:29:22 +00:00
parent 9ea6f2ef74
commit 07d7f79fdb
4 changed files with 1472 additions and 0 deletions

View File

@@ -0,0 +1,372 @@
/**
* Architecture Checker
* Validates component organization and architecture compliance
*/
import {
AnalysisResult,
ArchitectureMetrics,
ComponentMetrics,
OversizedComponent,
DependencyMetrics,
PatternMetrics,
CircularDependency,
PatternIssue,
Finding,
Status,
} from '../types/index.js';
import { getSourceFiles, readFile, getLineCount, normalizeFilePath } from '../utils/fileSystem.js';
import { logger } from '../utils/logger.js';
/**
* Architecture Checker
*/
export class ArchitectureChecker {
/**
* Check architecture compliance
*/
async analyze(filePaths: string[]): Promise<AnalysisResult> {
const startTime = performance.now();
try {
logger.debug('Starting architecture analysis...');
const components = this.analyzeComponents(filePaths);
const dependencies = this.analyzeDependencies(filePaths);
const patterns = this.analyzePatterns(filePaths);
const metrics: ArchitectureMetrics = {
components,
dependencies,
patterns,
};
const findings = this.generateFindings(metrics);
const score = this.calculateScore(metrics);
const executionTime = performance.now() - startTime;
logger.debug(`Architecture analysis complete (${executionTime.toFixed(2)}ms)`, {
components: components.totalCount,
circularDeps: dependencies.circularDependencies.length,
});
return {
category: 'architecture' as const,
score,
status: (score >= 80 ? 'pass' : score >= 70 ? 'warning' : 'fail') as Status,
findings,
metrics: metrics as unknown as Record<string, unknown>,
executionTime,
};
} catch (error) {
logger.error('Architecture analysis failed', { error: (error as Error).message });
throw error;
}
}
/**
* Analyze component organization
*/
private analyzeComponents(filePaths: string[]): ComponentMetrics {
const componentFiles: string[] = [];
let oversized: OversizedComponent[] = [];
// Find component files
for (const filePath of filePaths) {
if (
filePath.includes('/components/') &&
(filePath.endsWith('.tsx') || filePath.endsWith('.ts'))
) {
componentFiles.push(filePath);
// Check file size
const lines = getLineCount(filePath);
if (lines > 500) {
const componentName = this.extractComponentName(filePath);
const type = this.classifyComponent(filePath);
oversized.push({
file: normalizeFilePath(filePath),
name: componentName,
lines,
type: type as any,
suggestion: `Split into smaller components or extract logic to utilities`,
});
}
}
}
// Classify components by folder
const byType = {
atoms: filePaths.filter((f) => f.includes('/atoms/')).length,
molecules: filePaths.filter((f) => f.includes('/molecules/')).length,
organisms: filePaths.filter((f) => f.includes('/organisms/')).length,
templates: filePaths.filter((f) => f.includes('/templates/')).length,
unknown: componentFiles.length -
filePaths.filter((f) => f.includes('/atoms/')).length -
filePaths.filter((f) => f.includes('/molecules/')).length -
filePaths.filter((f) => f.includes('/organisms/')).length -
filePaths.filter((f) => f.includes('/templates/')).length,
};
const avgSize = componentFiles.length > 0
? componentFiles.reduce((sum, f) => sum + getLineCount(f), 0) / componentFiles.length
: 0;
return {
totalCount: componentFiles.length,
byType,
oversized: oversized.slice(0, 10),
misplaced: [],
averageSize: Math.round(avgSize),
};
}
/**
* Extract component name from file path
*/
private extractComponentName(filePath: string): string {
const parts = filePath.split('/');
const fileName = parts[parts.length - 1];
return fileName.replace(/\.(tsx?|jsx?)$/, '');
}
/**
* Classify component type based on folder
*/
private classifyComponent(filePath: string): string {
if (filePath.includes('/atoms/')) return 'atom';
if (filePath.includes('/molecules/')) return 'molecule';
if (filePath.includes('/organisms/')) return 'organism';
if (filePath.includes('/templates/')) return 'template';
return 'unknown';
}
/**
* Analyze dependencies and detect circular dependencies
*/
private analyzeDependencies(filePaths: string[]): DependencyMetrics {
const imports = new Map<string, Set<string>>();
const externalDependencies = new Map<string, number>();
// Build import graph
for (const filePath of filePaths) {
if (!filePath.endsWith('.ts') && !filePath.endsWith('.tsx')) continue;
try {
const content = readFile(filePath);
const normalizedPath = normalizeFilePath(filePath);
imports.set(normalizedPath, new Set());
// Extract imports
const importRegex = /import\s+.*?from\s+['"](.*?)['"]/g;
let match;
while ((match = importRegex.exec(content)) !== null) {
const importPath = match[1];
// Track external dependencies
if (importPath.startsWith('@') || (!importPath.startsWith('.') && !importPath.startsWith('/'))) {
const pkgName = importPath.split('/')[0];
externalDependencies.set(pkgName, (externalDependencies.get(pkgName) || 0) + 1);
} else {
// Track internal imports
imports.get(normalizedPath)!.add(importPath);
}
}
} catch (error) {
logger.debug(`Failed to analyze dependencies in ${filePath}`);
}
}
// Detect circular dependencies (simplified)
const circularDependencies: CircularDependency[] = [];
const visited = new Set<string>();
const recursionStack = new Set<string>();
for (const [file, deps] of imports.entries()) {
if (this.hasCyclicDependency(file, deps, imports, visited, recursionStack)) {
circularDependencies.push({
path: [file],
files: [file],
severity: 'high',
});
}
}
return {
totalModules: filePaths.filter((f) => f.endsWith('.ts') || f.endsWith('.tsx')).length,
circularDependencies: circularDependencies.slice(0, 5),
layerViolations: [],
externalDependencies,
};
}
/**
* Check if a file has cyclic dependencies
*/
private hasCyclicDependency(
file: string,
deps: Set<string>,
allImports: Map<string, Set<string>>,
visited: Set<string>,
recursionStack: Set<string>
): boolean {
if (visited.has(file)) return false;
if (recursionStack.has(file)) return true;
visited.add(file);
recursionStack.add(file);
for (const dep of deps) {
if (allImports.has(dep) && this.hasCyclicDependency(dep, allImports.get(dep)!, allImports, visited, recursionStack)) {
return true;
}
}
recursionStack.delete(file);
return false;
}
/**
* Analyze pattern compliance
*/
private analyzePatterns(filePaths: string[]): PatternMetrics {
const reduxIssues: PatternIssue[] = [];
const hookIssues: PatternIssue[] = [];
for (const filePath of filePaths) {
if (!filePath.endsWith('.ts') && !filePath.endsWith('.tsx')) continue;
try {
const content = readFile(filePath);
// Check Redux patterns
if (filePath.includes('/store/') || filePath.includes('/slices/')) {
if (content.includes('state.') && content.includes('=')) {
reduxIssues.push({
file: normalizeFilePath(filePath),
pattern: 'Redux Mutation',
issue: 'Direct state mutation detected',
suggestion: 'Use immer middleware or clone state before modifying',
severity: 'high',
});
}
}
// Check Hook patterns
if (content.includes('use')) {
// Simple check for hooks not at top level
const lines = content.split('\n');
for (let i = 0; i < lines.length; i++) {
const line = lines[i];
if (line.includes('if') && i + 1 < lines.length && lines[i + 1].includes('use')) {
hookIssues.push({
file: normalizeFilePath(filePath),
line: i + 1,
pattern: 'Hook not at top level',
issue: 'Hook called conditionally or inside a loop',
suggestion: 'Move hook to top level of component',
severity: 'high',
});
}
}
}
} catch (error) {
logger.debug(`Failed to analyze patterns in ${filePath}`);
}
}
return {
reduxCompliance: {
issues: reduxIssues.slice(0, 5),
score: 100 - Math.min(reduxIssues.length * 20, 100),
},
hookUsage: {
issues: hookIssues.slice(0, 5),
score: 100 - Math.min(hookIssues.length * 20, 100),
},
reactBestPractices: {
issues: [],
score: 80,
},
};
}
/**
* Generate findings from metrics
*/
private generateFindings(metrics: ArchitectureMetrics): Finding[] {
const findings: Finding[] = [];
// Component size findings
for (const component of metrics.components.oversized.slice(0, 3)) {
findings.push({
id: `oversized-${component.file}`,
severity: 'medium',
category: 'architecture',
title: 'Oversized component',
description: `Component '${component.name}' has ${component.lines} lines, recommended max is 300`,
location: {
file: component.file,
},
remediation: component.suggestion,
evidence: `Lines: ${component.lines}`,
});
}
// Circular dependency findings
for (const cycle of metrics.dependencies.circularDependencies) {
findings.push({
id: `circular-${cycle.files[0]}`,
severity: 'high',
category: 'architecture',
title: 'Circular dependency detected',
description: `Circular dependency: ${cycle.files.join(' → ')}`,
remediation: 'Restructure modules to break the circular dependency',
evidence: `Cycle: ${cycle.path.join(' → ')}`,
});
}
// Pattern violations
for (const issue of metrics.patterns.reduxCompliance.issues.slice(0, 2)) {
findings.push({
id: `redux-${issue.file}`,
severity: issue.severity,
category: 'architecture',
title: 'Redux pattern violation',
description: issue.issue,
location: {
file: issue.file,
line: issue.line,
},
remediation: issue.suggestion,
});
}
return findings;
}
/**
* Calculate overall architecture score
*/
private calculateScore(metrics: ArchitectureMetrics): number {
const { components, dependencies, patterns } = metrics;
// Component score: reduce for oversized
let componentScore = Math.max(0, 100 - components.oversized.length * 10);
// Dependency score: reduce for circular deps
let dependencyScore = Math.max(0, 100 - dependencies.circularDependencies.length * 20);
// Pattern score: use existing pattern scores
const patternScore =
(patterns.reduxCompliance.score + patterns.hookUsage.score + patterns.reactBestPractices.score) /
3;
return componentScore * 0.35 + dependencyScore * 0.35 + patternScore * 0.3;
}
}
export const architectureChecker = new ArchitectureChecker();

View File

@@ -0,0 +1,399 @@
/**
* Code Quality Analyzer
* Analyzes complexity, duplication, and linting violations
*/
import {
AnalysisResult,
CodeQualityMetrics,
ComplexityMetrics,
ComplexityFunction,
DuplicationMetrics,
LintingMetrics,
LintingViolation,
Finding,
Status,
} from '../types/index.js';
import { getSourceFiles, readFile, normalizeFilePath } from '../utils/fileSystem.js';
import { logger } from '../utils/logger.js';
/**
* Code Quality Analyzer
*/
export class CodeQualityAnalyzer {
/**
* Analyze code quality across all dimensions
*/
async analyze(filePaths: string[]): Promise<AnalysisResult> {
const startTime = performance.now();
try {
logger.debug('Starting code quality analysis...');
// Analyze each dimension
const complexity = this.analyzeComplexity(filePaths);
const duplication = this.analyzeDuplication(filePaths);
const linting = this.analyzeLinting(filePaths);
const metrics: CodeQualityMetrics = {
complexity,
duplication,
linting,
};
// Generate findings
const findings = this.generateFindings(metrics);
// Calculate score
const score = this.calculateScore(metrics);
const executionTime = performance.now() - startTime;
logger.debug(`Code quality analysis complete (${executionTime.toFixed(2)}ms)`, {
complexityScore: score,
findings: findings.length,
});
return {
category: 'codeQuality' as const,
score,
status: (score >= 80 ? 'pass' : score >= 70 ? 'warning' : 'fail') as Status,
findings,
metrics: metrics as unknown as Record<string, unknown>,
executionTime,
};
} catch (error) {
logger.error('Code quality analysis failed', { error: (error as Error).message });
throw error;
}
}
/**
* Analyze cyclomatic complexity
*/
private analyzeComplexity(filePaths: string[]): ComplexityMetrics {
const functions: ComplexityFunction[] = [];
let totalComplexity = 0;
let maxComplexity = 0;
for (const filePath of filePaths) {
if (!filePath.endsWith('.ts') && !filePath.endsWith('.tsx')) continue;
try {
const content = readFile(filePath);
const parsed = this.extractComplexityFromFile(filePath, content);
functions.push(...parsed.functions);
totalComplexity += parsed.totalComplexity;
maxComplexity = Math.max(maxComplexity, parsed.maxComplexity);
} catch (error) {
logger.debug(`Failed to analyze complexity in ${filePath}`, {
error: (error as Error).message,
});
}
}
const averagePerFile = filePaths.length > 0 ? totalComplexity / filePaths.length : 0;
// Count distribution
const distribution = {
good: functions.filter((f) => f.complexity <= 10).length,
warning: functions.filter((f) => f.complexity > 10 && f.complexity <= 20).length,
critical: functions.filter((f) => f.complexity > 20).length,
};
return {
functions: functions.sort((a, b) => b.complexity - a.complexity).slice(0, 20),
averagePerFile,
maximum: maxComplexity,
distribution,
};
}
/**
* Extract complexity from a single file
*/
private extractComplexityFromFile(
filePath: string,
content: string
): {
functions: ComplexityFunction[];
totalComplexity: number;
maxComplexity: number;
} {
const functions: ComplexityFunction[] = [];
let totalComplexity = 0;
let maxComplexity = 0;
// Simple function detection regex
const functionRegex = /(?:async\s+)?(?:function|const|let|var)\s+(\w+)\s*(?::|=)\s*(?:async\s*)?(?:function|\()/gm;
let match;
while ((match = functionRegex.exec(content)) !== null) {
const functionName = match[1];
const startIdx = match.index;
const lineNum = content.substring(0, startIdx).split('\n').length;
// Simple complexity calculation based on keywords
const complexity = this.calculateSimpleComplexity(content.substring(startIdx, startIdx + 1000));
if (complexity > 0) {
functions.push({
file: normalizeFilePath(filePath),
name: functionName,
line: lineNum,
complexity,
status: complexity <= 10 ? 'good' : complexity <= 20 ? 'warning' : 'critical',
});
totalComplexity += complexity;
maxComplexity = Math.max(maxComplexity, complexity);
}
}
return { functions, totalComplexity, maxComplexity };
}
/**
* Calculate simple complexity based on control flow keywords
*/
private calculateSimpleComplexity(code: string): number {
let complexity = 1; // Base complexity
// Count control flow statements
const controlFlowKeywords = [
'if',
'else',
'case',
'catch',
'while',
'for',
'do',
'&&',
'||',
'\\?',
':',
];
for (const keyword of controlFlowKeywords) {
const regex = new RegExp(`\\b${keyword}\\b`, 'g');
const matches = code.match(regex);
complexity += (matches ? matches.length : 0) * 0.5;
}
return Math.ceil(complexity);
}
/**
* Analyze code duplication
*/
private analyzeDuplication(filePaths: string[]): DuplicationMetrics {
// Simplified duplication detection
const blocks: any[] = [];
let totalDupLines = 0;
// This is a simplified version - full version would use jscpd library
// For now, just estimate based on import statements
const importCounts = new Map<string, number>();
for (const filePath of filePaths) {
if (!filePath.endsWith('.ts') && !filePath.endsWith('.tsx')) continue;
try {
const content = readFile(filePath);
const imports = content.match(/^import .* from ['"]/gm);
if (imports) {
for (const imp of imports) {
importCounts.set(imp, (importCounts.get(imp) || 0) + 1);
}
}
} catch (error) {
logger.debug(`Failed to analyze duplication in ${filePath}`);
}
}
// Estimate duplication percentage (simplified)
let duplicateCount = 0;
for (const count of importCounts.values()) {
if (count > 1) {
duplicateCount += count - 1;
}
}
const totalLines = filePaths.reduce((sum, f) => {
try {
const content = readFile(f);
return sum + content.split('\n').length;
} catch {
return sum;
}
}, 0);
const duplicationPercent = totalLines > 0 ? (duplicateCount / (totalLines / 10)) * 100 : 0;
return {
percent: Math.min(100, Math.max(0, duplicationPercent * 0.1)), // Scale down
lines: Math.ceil(duplicateCount),
blocks,
status: duplicationPercent < 3 ? 'good' : duplicationPercent < 5 ? 'warning' : 'critical',
};
}
/**
* Analyze linting violations
*/
private analyzeLinting(filePaths: string[]): LintingMetrics {
// In a real implementation, this would use ESLint API
// For now, return mock data
const violations: LintingViolation[] = [];
// Simple check for common issues
for (const filePath of filePaths) {
if (!filePath.endsWith('.ts') && !filePath.endsWith('.tsx')) continue;
try {
const content = readFile(filePath);
const lines = content.split('\n');
for (let i = 0; i < lines.length; i++) {
const line = lines[i];
// Check for common linting issues
if (line.includes('console.log') && !filePath.includes('.spec.') && !filePath.includes('.test.')) {
violations.push({
file: normalizeFilePath(filePath),
line: i + 1,
column: line.indexOf('console.log') + 1,
severity: 'warning',
rule: 'no-console',
message: 'Unexpected console statement',
fixable: true,
});
}
if (line.includes('var ')) {
violations.push({
file: normalizeFilePath(filePath),
line: i + 1,
column: line.indexOf('var ') + 1,
severity: 'warning',
rule: 'no-var',
message: 'Unexpected var, use let or const instead',
fixable: true,
});
}
}
} catch (error) {
logger.debug(`Failed to lint ${filePath}`);
}
}
const errors = violations.filter((v) => v.severity === 'error').length;
const warnings = violations.filter((v) => v.severity === 'warning').length;
const info = violations.filter((v) => v.severity === 'info').length;
// Group by rule
const byRule = new Map<string, LintingViolation[]>();
for (const violation of violations) {
if (!byRule.has(violation.rule)) {
byRule.set(violation.rule, []);
}
byRule.get(violation.rule)!.push(violation);
}
return {
errors,
warnings,
info,
violations,
byRule,
status: errors > 0 ? 'critical' : warnings > 5 ? 'warning' : 'good',
};
}
/**
* Generate findings from metrics
*/
private generateFindings(metrics: CodeQualityMetrics): Finding[] {
const findings: Finding[] = [];
// Complexity findings
for (const func of metrics.complexity.functions.slice(0, 5)) {
if (func.status === 'critical') {
findings.push({
id: `cc-${func.file}-${func.line}`,
severity: 'high',
category: 'codeQuality',
title: 'High cyclomatic complexity',
description: `Function '${func.name}' has complexity of ${func.complexity}, exceeding threshold of 20`,
location: {
file: func.file,
line: func.line,
},
remediation: 'Extract complex logic into smaller functions, use guard clauses instead of nested if statements',
evidence: `Complexity: ${func.complexity}`,
});
}
}
// Duplication findings
if (metrics.duplication.percent > 5) {
findings.push({
id: 'dup-high',
severity: 'medium',
category: 'codeQuality',
title: 'High code duplication',
description: `${metrics.duplication.percent.toFixed(1)}% of code appears to be duplicated`,
remediation: 'Extract duplicated code into reusable components or utility functions',
evidence: `Duplication: ${metrics.duplication.percent.toFixed(1)}%`,
});
}
// Linting findings
if (metrics.linting.errors > 0) {
findings.push({
id: 'lint-errors',
severity: 'high',
category: 'codeQuality',
title: 'Linting errors',
description: `Found ${metrics.linting.errors} linting errors`,
remediation: 'Run eslint with --fix to auto-fix issues',
evidence: `Errors: ${metrics.linting.errors}`,
});
}
return findings;
}
/**
* Calculate overall code quality score
*/
private calculateScore(metrics: CodeQualityMetrics): number {
const { complexity, duplication, linting } = metrics;
// Complexity score: 0-100
const complexityScore = Math.max(
0,
100 - complexity.distribution.critical * 5 - complexity.distribution.warning * 2
);
// Duplication score: 0-100
let duplicationScore = 100;
if (duplication.percent < 3) duplicationScore = 100;
else if (duplication.percent < 5) duplicationScore = 90;
else if (duplication.percent < 10) duplicationScore = 70;
else duplicationScore = Math.max(0, 100 - (duplication.percent - 10) * 5);
// Linting score: 0-100
let lintingScore = 100 - linting.errors * 10;
if (linting.warnings > 5) {
lintingScore -= (linting.warnings - 5) * 2;
}
lintingScore = Math.max(0, lintingScore);
// Weighted average
return complexityScore * 0.4 + duplicationScore * 0.35 + lintingScore * 0.25;
}
}
export const codeQualityAnalyzer = new CodeQualityAnalyzer();

View File

@@ -0,0 +1,350 @@
/**
* Test Coverage Analyzer
* Analyzes test coverage metrics and effectiveness
*/
import {
AnalysisResult,
TestCoverageMetrics,
CoverageSummary,
CoverageMetric,
FileCoverage,
TestEffectiveness,
TestIssue,
CoverageGap,
Finding,
Status,
} from '../types/index.js';
import { pathExists, readJsonFile, normalizeFilePath } from '../utils/fileSystem.js';
import { logger } from '../utils/logger.js';
/**
* Test Coverage Analyzer
*/
export class CoverageAnalyzer {
/**
* Analyze test coverage
*/
async analyze(): Promise<AnalysisResult> {
const startTime = performance.now();
try {
logger.debug('Starting test coverage analysis...');
// Try to find coverage data
const coveragePath = this.findCoveragePath();
let metrics: TestCoverageMetrics;
if (coveragePath) {
metrics = this.analyzeCoverageData(coveragePath);
} else {
logger.warn('No coverage data found, using defaults');
metrics = this.getDefaultMetrics();
}
// Analyze effectiveness
metrics.effectiveness = this.analyzeEffectiveness();
// Identify coverage gaps
metrics.gaps = this.identifyCoverageGaps(metrics);
// Generate findings
const findings = this.generateFindings(metrics);
// Calculate score
const score = this.calculateScore(metrics);
const executionTime = performance.now() - startTime;
logger.debug(`Coverage analysis complete (${executionTime.toFixed(2)}ms)`, {
score,
findings: findings.length,
});
return {
category: 'testCoverage' as const,
score,
status: (score >= 80 ? 'pass' : score >= 60 ? 'warning' : 'fail') as Status,
findings,
metrics: metrics as unknown as Record<string, unknown>,
executionTime,
};
} catch (error) {
logger.error('Coverage analysis failed', { error: (error as Error).message });
throw error;
}
}
/**
* Find coverage data path
*/
private findCoveragePath(): string | null {
const possiblePaths = [
'coverage/coverage-final.json',
'coverage-final.json',
'.nyc_output/coverage-final.json',
'./coverage/coverage-final.json',
];
for (const path of possiblePaths) {
if (pathExists(path)) {
return path;
}
}
return null;
}
/**
* Analyze coverage data from JSON file
*/
private analyzeCoverageData(coveragePath: string): TestCoverageMetrics {
try {
const data: any = readJsonFile(coveragePath);
// Parse coverage summary
const summary = this.parseCoverageSummary(data);
// Parse file-level coverage
const byFile: Record<string, FileCoverage> = {};
for (const [filePath, fileCoverage] of Object.entries(data)) {
if (filePath === 'total' || typeof fileCoverage !== 'object') continue;
const fc = fileCoverage as any;
byFile[normalizeFilePath(filePath)] = {
path: normalizeFilePath(filePath),
lines: this.parseCoverageMetric(fc.lines),
branches: this.parseCoverageMetric(fc.branches),
functions: this.parseCoverageMetric(fc.functions),
statements: this.parseCoverageMetric(fc.statements),
};
}
return {
overall: summary,
byFile,
effectiveness: this.analyzeEffectiveness(),
gaps: [],
};
} catch (error) {
logger.debug(`Failed to analyze coverage data: ${(error as Error).message}`);
return this.getDefaultMetrics();
}
}
/**
* Parse coverage summary from data
*/
private parseCoverageSummary(data: any): CoverageSummary {
const total = data.total || {};
return {
lines: this.parseCoverageMetric(total.lines),
branches: this.parseCoverageMetric(total.branches),
functions: this.parseCoverageMetric(total.functions),
statements: this.parseCoverageMetric(total.statements),
};
}
/**
* Parse individual coverage metric
*/
private parseCoverageMetric(metric: any): CoverageMetric {
const total = metric?.total || 0;
const covered = metric?.covered || 0;
const percentage = total > 0 ? (covered / total) * 100 : 100;
let status: 'excellent' | 'acceptable' | 'poor';
if (percentage >= 80) status = 'excellent';
else if (percentage >= 60) status = 'acceptable';
else status = 'poor';
return {
total,
covered,
percentage,
status,
};
}
/**
* Get default metrics when no coverage data
*/
private getDefaultMetrics(): TestCoverageMetrics {
const defaultMetric: CoverageMetric = {
total: 0,
covered: 0,
percentage: 0,
status: 'poor',
};
return {
overall: {
lines: defaultMetric,
branches: defaultMetric,
functions: defaultMetric,
statements: defaultMetric,
},
byFile: {},
effectiveness: {
totalTests: 0,
testsWithMeaningfulNames: 0,
averageAssertionsPerTest: 0,
testsWithoutAssertions: 0,
excessivelyMockedTests: 0,
effectivenessScore: 0,
issues: [],
},
gaps: [],
};
}
/**
* Analyze test effectiveness
*/
private analyzeEffectiveness(): TestEffectiveness {
// Simplified analysis - would require test file parsing
const issues: TestIssue[] = [];
return {
totalTests: 0,
testsWithMeaningfulNames: 0,
averageAssertionsPerTest: 0,
testsWithoutAssertions: 0,
excessivelyMockedTests: 0,
effectivenessScore: 70,
issues,
};
}
/**
* Identify coverage gaps
*/
private identifyCoverageGaps(metrics: TestCoverageMetrics): CoverageGap[] {
const gaps: CoverageGap[] = [];
// Find files with low coverage
for (const [, fileCoverage] of Object.entries(metrics.byFile)) {
const coverage = fileCoverage.lines.percentage;
if (coverage < 80) {
const uncoveredLines = fileCoverage.lines.total - fileCoverage.lines.covered;
let criticality: 'critical' | 'high' | 'medium' | 'low';
if (coverage < 50) criticality = 'critical';
else if (coverage < 65) criticality = 'high';
else if (coverage < 80) criticality = 'medium';
else criticality = 'low';
gaps.push({
file: fileCoverage.path,
coverage,
uncoveredLines,
criticality,
suggestedTests: this.suggestTests(fileCoverage.path),
estimatedEffort: uncoveredLines > 100 ? 'high' : uncoveredLines > 50 ? 'medium' : 'low',
});
}
}
return gaps.sort((a, b) => a.coverage - b.coverage).slice(0, 10);
}
/**
* Suggest tests for a file
*/
private suggestTests(filePath: string): string[] {
// Simple suggestions based on file content
const suggestions: string[] = [];
if (filePath.includes('utils')) {
suggestions.push('Test utility functions with various inputs');
}
if (filePath.includes('components')) {
suggestions.push('Test component rendering');
suggestions.push('Test component props');
suggestions.push('Test component event handlers');
}
if (filePath.includes('hooks')) {
suggestions.push('Test hook initialization');
suggestions.push('Test hook state changes');
}
if (filePath.includes('store') || filePath.includes('redux')) {
suggestions.push('Test reducer logic');
suggestions.push('Test selector functions');
suggestions.push('Test action creators');
}
return suggestions;
}
/**
* Generate findings from metrics
*/
private generateFindings(metrics: TestCoverageMetrics): Finding[] {
const findings: Finding[] = [];
// Overall coverage findings
if (metrics.overall.lines.percentage < 80) {
findings.push({
id: 'coverage-low',
severity: 'high',
category: 'testCoverage',
title: 'Low test coverage',
description: `Overall line coverage is ${metrics.overall.lines.percentage.toFixed(1)}%, target is 80%`,
remediation: 'Add tests for uncovered code paths to increase coverage',
evidence: `Lines: ${metrics.overall.lines.percentage.toFixed(1)}%, Branches: ${metrics.overall.branches.percentage.toFixed(1)}%`,
});
}
if (metrics.overall.branches.percentage < 75) {
findings.push({
id: 'coverage-branch-low',
severity: 'medium',
category: 'testCoverage',
title: 'Low branch coverage',
description: `Branch coverage is ${metrics.overall.branches.percentage.toFixed(1)}%, target is 75%`,
remediation: 'Add tests for conditional branches and edge cases',
evidence: `Branches: ${metrics.overall.branches.percentage.toFixed(1)}%`,
});
}
// Coverage gaps findings
for (const gap of metrics.gaps.slice(0, 3)) {
findings.push({
id: `gap-${gap.file}`,
severity: gap.criticality === 'critical' ? 'high' : 'medium',
category: 'testCoverage',
title: `Low coverage in ${gap.file}`,
description: `File has only ${gap.coverage.toFixed(1)}% coverage with ${gap.uncoveredLines} uncovered lines`,
location: {
file: gap.file,
},
remediation: gap.suggestedTests.join('; '),
evidence: `Coverage: ${gap.coverage.toFixed(1)}%, Uncovered: ${gap.uncoveredLines}`,
});
}
return findings;
}
/**
* Calculate overall coverage score
*/
private calculateScore(metrics: TestCoverageMetrics): number {
const { overall, effectiveness } = metrics;
// Average coverage across all types
const avgCoverage =
(overall.lines.percentage +
overall.branches.percentage +
overall.functions.percentage +
overall.statements.percentage) /
4;
// Score is 60% coverage + 40% effectiveness
return avgCoverage * 0.6 + effectiveness.effectivenessScore * 0.4;
}
}
export const coverageAnalyzer = new CoverageAnalyzer();

View File

@@ -0,0 +1,351 @@
/**
* Security Scanner
* Scans for vulnerabilities and security anti-patterns
*/
import { execSync } from 'child_process';
import {
AnalysisResult,
SecurityMetrics,
Vulnerability,
SecurityAntiPattern,
PerformanceIssue,
Finding,
AnalysisErrorClass,
Status,
} from '../types/index.js';
import { readFile, getSourceFiles, normalizeFilePath } from '../utils/fileSystem.js';
import { logger } from '../utils/logger.js';
/**
* Security Scanner
*/
export class SecurityScanner {
/**
* Scan for security issues
*/
async analyze(filePaths: string[]): Promise<AnalysisResult> {
const startTime = performance.now();
try {
logger.debug('Starting security analysis...');
const vulnerabilities = this.scanVulnerabilities();
const codePatterns = this.detectSecurityPatterns(filePaths);
const performanceIssues = this.checkPerformanceIssues(filePaths);
const metrics: SecurityMetrics = {
vulnerabilities,
codePatterns,
performanceIssues,
};
const findings = this.generateFindings(metrics);
const score = this.calculateScore(metrics);
const executionTime = performance.now() - startTime;
logger.debug(`Security analysis complete (${executionTime.toFixed(2)}ms)`, {
vulnerabilities: vulnerabilities.length,
patterns: codePatterns.length,
});
return {
category: 'security' as const,
score,
status: (score >= 80 ? 'pass' : score >= 60 ? 'warning' : 'fail') as Status,
findings,
metrics: metrics as unknown as Record<string, unknown>,
executionTime,
};
} catch (error) {
logger.error('Security analysis failed', { error: (error as Error).message });
throw error;
}
}
/**
* Scan for vulnerabilities using npm audit
*/
private scanVulnerabilities(): Vulnerability[] {
const vulnerabilities: Vulnerability[] = [];
try {
const output = execSync('npm audit --json', {
encoding: 'utf-8',
timeout: 30000,
stdio: ['pipe', 'pipe', 'pipe'],
});
const data = JSON.parse(output);
if (data.vulnerabilities) {
for (const [pkgName, vulnData] of Object.entries(data.vulnerabilities)) {
const vuln = vulnData as any;
vulnerabilities.push({
package: pkgName,
currentVersion: vuln.installed || 'unknown',
vulnerabilityType: vuln.type || 'unknown',
severity: vuln.severity || 'medium',
description: vuln.via?.[0]?.title || vuln.description || 'No description',
fixedInVersion: vuln.via?.[0]?.fixed || 'No fix available',
});
}
}
} catch (error) {
logger.warn('npm audit scan failed', {
error: (error as Error).message,
});
}
return vulnerabilities;
}
/**
* Detect security anti-patterns in code
*/
private detectSecurityPatterns(filePaths: string[]): SecurityAntiPattern[] {
const patterns: SecurityAntiPattern[] = [];
for (const filePath of filePaths) {
if (!filePath.endsWith('.ts') && !filePath.endsWith('.tsx')) continue;
try {
const content = readFile(filePath);
const lines = content.split('\n');
for (let i = 0; i < lines.length; i++) {
const line = lines[i];
const lineNum = i + 1;
// Check for hard-coded secrets
if (this.isHardcodedSecret(line)) {
patterns.push({
type: 'secret',
severity: 'critical',
file: normalizeFilePath(filePath),
line: lineNum,
column: line.indexOf(line.match(/password|secret|token|apiKey|API_KEY/i)![0]),
message: 'Possible hard-coded secret detected',
remediation: 'Use environment variables or secure configuration management for sensitive data',
evidence: line.substring(0, 50) + '...',
});
}
// Check for dangerous patterns
if (line.includes('dangerouslySetInnerHTML')) {
patterns.push({
type: 'unsafeDom',
severity: 'high',
file: normalizeFilePath(filePath),
line: lineNum,
message: 'dangerouslySetInnerHTML used',
remediation: 'Use safe HTML rendering methods or sanitize HTML content with DOMPurify',
evidence: 'dangerouslySetInnerHTML',
});
}
if (line.includes('eval(')) {
patterns.push({
type: 'unsafeDom',
severity: 'critical',
file: normalizeFilePath(filePath),
line: lineNum,
message: 'eval() usage detected',
remediation: 'Never use eval(). Use alternative approaches like JSON.parse() or Function constructor with caution',
evidence: 'eval(',
});
}
if (line.includes('innerHTML =')) {
patterns.push({
type: 'unsafeDom',
severity: 'high',
file: normalizeFilePath(filePath),
line: lineNum,
message: 'Direct innerHTML assignment',
remediation: 'Use textContent for text or createElement/appendChild for safe DOM manipulation',
evidence: 'innerHTML =',
});
}
// Check for XSS risks
if (
(line.includes('innerHTML') || line.includes('dangerouslySetInnerHTML')) &&
(line.includes('user') || line.includes('input') || line.includes('data'))
) {
patterns.push({
type: 'xss',
severity: 'high',
file: normalizeFilePath(filePath),
line: lineNum,
message: 'Potential XSS vulnerability: unescaped user input in HTML',
remediation: 'Escape HTML entities or use a library like DOMPurify',
evidence: line.substring(0, 60) + '...',
});
}
}
} catch (error) {
logger.debug(`Failed to scan security patterns in ${filePath}`);
}
}
return patterns.slice(0, 20);
}
/**
* Check if a line contains hard-coded secrets
*/
private isHardcodedSecret(line: string): boolean {
const secretPatterns = [
/password\s*[:=]\s*['"]/i,
/secret\s*[:=]\s*['"]/i,
/token\s*[:=]\s*['"]/i,
/apiKey\s*[:=]\s*['"]/i,
/api_key\s*[:=]\s*['"]/i,
/authorization\s*[:=]\s*['"]/i,
/auth\s*[:=]\s*['"]/i,
];
for (const pattern of secretPatterns) {
if (pattern.test(line)) {
return true;
}
}
return false;
}
/**
* Check for performance issues
*/
private checkPerformanceIssues(filePaths: string[]): PerformanceIssue[] {
const issues: PerformanceIssue[] = [];
for (const filePath of filePaths) {
if (!filePath.endsWith('.tsx') && !filePath.endsWith('.ts')) continue;
try {
const content = readFile(filePath);
const lines = content.split('\n');
for (let i = 0; i < lines.length; i++) {
const line = lines[i];
const lineNum = i + 1;
// Check for inline functions in JSX
if (line.includes('onClick={') && line.includes('=>')) {
issues.push({
type: 'inlineFunction',
severity: 'medium',
file: normalizeFilePath(filePath),
line: lineNum,
message: 'Inline function definition in JSX',
suggestion: 'Define function outside JSX or use useCallback to prevent unnecessary re-renders',
estimatedImpact: 'Performance degradation in large lists',
});
}
// Check for missing keys in lists
if (
line.includes('.map(') &&
!line.includes('key=') &&
i + 1 < lines.length &&
(lines[i + 1].includes('key=') === false)
) {
issues.push({
type: 'missingKey',
severity: 'high',
file: normalizeFilePath(filePath),
line: lineNum,
message: 'List items missing key prop',
suggestion: 'Add unique key prop to each list item',
estimatedImpact: 'Rendering issues and performance problems',
});
}
// Check for inline objects/arrays in props
if (line.includes('={{') || line.includes('={[')) {
issues.push({
type: 'inlineObject',
severity: 'medium',
file: normalizeFilePath(filePath),
line: lineNum,
message: 'Inline object/array literal in JSX props',
suggestion: 'Move to state or memoize with useMemo',
estimatedImpact: 'Unnecessary re-renders of child components',
});
}
}
} catch (error) {
logger.debug(`Failed to check performance issues in ${filePath}`);
}
}
return issues.slice(0, 20);
}
/**
* Generate findings from metrics
*/
private generateFindings(metrics: SecurityMetrics): Finding[] {
const findings: Finding[] = [];
// Vulnerability findings
for (const vuln of metrics.vulnerabilities.slice(0, 5)) {
findings.push({
id: `vuln-${vuln.package}`,
severity: vuln.severity === 'critical' ? 'critical' : 'high',
category: 'security',
title: `Vulnerability in ${vuln.package}`,
description: vuln.description,
remediation: `Update ${vuln.package} to version ${vuln.fixedInVersion}`,
evidence: `${vuln.severity} severity in ${vuln.vulnerabilityType}`,
});
}
// Code pattern findings
for (const pattern of metrics.codePatterns.slice(0, 5)) {
findings.push({
id: `pattern-${pattern.file}-${pattern.line}`,
severity: pattern.severity,
category: 'security',
title: pattern.message,
description: `${pattern.type} vulnerability detected`,
location: {
file: pattern.file,
line: pattern.line,
},
remediation: pattern.remediation,
evidence: pattern.evidence,
});
}
return findings;
}
/**
* Calculate security score
*/
private calculateScore(metrics: SecurityMetrics): number {
let score = 100;
// Vulnerabilities
const criticalVulns = metrics.vulnerabilities.filter((v) => v.severity === 'critical').length;
const highVulns = metrics.vulnerabilities.filter((v) => v.severity === 'high').length;
score -= criticalVulns * 25 + highVulns * 10;
// Code patterns
const criticalPatterns = metrics.codePatterns.filter((p) => p.severity === 'critical').length;
const highPatterns = metrics.codePatterns.filter((p) => p.severity === 'high').length;
score -= criticalPatterns * 15 + highPatterns * 5;
// Performance issues
score -= Math.min(metrics.performanceIssues.length * 2, 20);
return Math.max(0, score);
}
}
export const securityScanner = new SecurityScanner();