mirror of
https://github.com/johndoe6345789/low-code-react-app-b.git
synced 2026-05-05 10:59:35 +00:00
Generated by Spark: getting masses of bad gateway
This commit is contained in:
@@ -1,7 +1,8 @@
|
||||
import { useState, useEffect, useCallback } from 'react'
|
||||
import { useState, useCallback } from 'react'
|
||||
import { ProjectFile } from '@/types/project'
|
||||
import { CodeError } from '@/types/errors'
|
||||
import { ErrorRepairService } from '@/lib/error-repair-service'
|
||||
import { scanRateLimiter } from '@/lib/rate-limiter'
|
||||
|
||||
export function useAutoRepair(
|
||||
files: ProjectFile[],
|
||||
@@ -15,18 +16,26 @@ export function useAutoRepair(
|
||||
|
||||
setIsScanning(true)
|
||||
try {
|
||||
const allErrors: CodeError[] = []
|
||||
|
||||
for (const file of files) {
|
||||
if (file && file.content) {
|
||||
const fileErrors = await ErrorRepairService.detectErrors(file)
|
||||
if (Array.isArray(fileErrors)) {
|
||||
allErrors.push(...fileErrors)
|
||||
const result = await scanRateLimiter.throttle(
|
||||
'error-scan',
|
||||
async () => {
|
||||
const allErrors: CodeError[] = []
|
||||
|
||||
for (const file of files) {
|
||||
if (file && file.content) {
|
||||
const fileErrors = await ErrorRepairService.detectErrors(file)
|
||||
if (Array.isArray(fileErrors)) {
|
||||
allErrors.push(...fileErrors)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return allErrors
|
||||
},
|
||||
'low'
|
||||
)
|
||||
|
||||
setErrors(allErrors)
|
||||
setErrors(result || [])
|
||||
} catch (error) {
|
||||
console.error('Auto-scan failed:', error)
|
||||
setErrors([])
|
||||
@@ -35,16 +44,6 @@ export function useAutoRepair(
|
||||
}
|
||||
}, [files, enabled])
|
||||
|
||||
useEffect(() => {
|
||||
if (enabled) {
|
||||
const timeoutId = setTimeout(() => {
|
||||
scanFiles()
|
||||
}, 2000)
|
||||
|
||||
return () => clearTimeout(timeoutId)
|
||||
}
|
||||
}, [files, enabled, scanFiles])
|
||||
|
||||
return {
|
||||
errors: Array.isArray(errors) ? errors : [],
|
||||
isScanning,
|
||||
|
||||
+68
-21
@@ -1,5 +1,6 @@
|
||||
// @ts-nocheck
|
||||
import { PrismaModel, ComponentNode, ThemeConfig, ProjectFile } from '@/types/project'
|
||||
import { ProtectedLLMService } from './protected-llm-service'
|
||||
|
||||
export class AIService {
|
||||
static async generateComponent(description: string): Promise<ComponentNode | null> {
|
||||
@@ -21,9 +22,16 @@ Return a valid JSON object with a single property "component" containing the com
|
||||
|
||||
Make sure to use appropriate Material UI components and props. Keep the structure clean and semantic.`
|
||||
|
||||
const response = await window.spark.llm(prompt, 'gpt-4o', true)
|
||||
const parsed = JSON.parse(response)
|
||||
return parsed.component
|
||||
const result = await ProtectedLLMService.safeLLMCall(
|
||||
prompt,
|
||||
{ jsonMode: true, priority: 'medium', category: 'generate-component' }
|
||||
)
|
||||
|
||||
if (result) {
|
||||
const parsed = JSON.parse(result)
|
||||
return parsed.component
|
||||
}
|
||||
return null
|
||||
} catch (error) {
|
||||
console.error('AI component generation failed:', error)
|
||||
return null
|
||||
@@ -66,9 +74,16 @@ Return a valid JSON object with a single property "model" containing the model s
|
||||
|
||||
Include an id field with uuid() default. Add createdAt and updatedAt DateTime fields with @default(now()) and @updatedAt. Use appropriate field types and relationships.`
|
||||
|
||||
const response = await window.spark.llm(prompt, 'gpt-4o', true)
|
||||
const parsed = JSON.parse(response)
|
||||
return parsed.model
|
||||
const result = await ProtectedLLMService.safeLLMCall(
|
||||
prompt,
|
||||
{ jsonMode: true, priority: 'medium', category: 'generate-model' }
|
||||
)
|
||||
|
||||
if (result) {
|
||||
const parsed = JSON.parse(result)
|
||||
return parsed.model
|
||||
}
|
||||
return null
|
||||
} catch (error) {
|
||||
console.error('AI model generation failed:', error)
|
||||
return null
|
||||
@@ -95,8 +110,12 @@ Generate clean, production-ready code following Next.js 14 and Material UI best
|
||||
|
||||
Return ONLY the code without any markdown formatting or explanations.`
|
||||
|
||||
const code = await window.spark.llm(prompt, 'gpt-4o', false)
|
||||
return code.trim()
|
||||
const result = await ProtectedLLMService.safeLLMCall(
|
||||
prompt,
|
||||
{ jsonMode: false, priority: 'medium', category: 'generate-code' }
|
||||
)
|
||||
|
||||
return result ? result.trim() : null
|
||||
} catch (error) {
|
||||
console.error('AI code generation failed:', error)
|
||||
return null
|
||||
@@ -112,8 +131,12 @@ ${code}
|
||||
|
||||
Return ONLY the improved code without any markdown formatting or explanations.`
|
||||
|
||||
const improved = await window.spark.llm(prompt, 'gpt-4o', false)
|
||||
return improved.trim()
|
||||
const result = await ProtectedLLMService.safeLLMCall(
|
||||
prompt,
|
||||
{ jsonMode: false, priority: 'high', category: 'improve-code' }
|
||||
)
|
||||
|
||||
return result ? result.trim() : null
|
||||
} catch (error) {
|
||||
console.error('AI code improvement failed:', error)
|
||||
return null
|
||||
@@ -145,9 +168,16 @@ Return a valid JSON object with a single property "theme" containing:
|
||||
|
||||
Choose colors that match the description and ensure good contrast. Use common font stacks.`
|
||||
|
||||
const response = await window.spark.llm(prompt, 'gpt-4o', true)
|
||||
const parsed = JSON.parse(response)
|
||||
return parsed.theme
|
||||
const result = await ProtectedLLMService.safeLLMCall(
|
||||
prompt,
|
||||
{ jsonMode: true, priority: 'low', category: 'generate-theme' }
|
||||
)
|
||||
|
||||
if (result) {
|
||||
const parsed = JSON.parse(result)
|
||||
return parsed.theme
|
||||
}
|
||||
return null
|
||||
} catch (error) {
|
||||
console.error('AI theme generation failed:', error)
|
||||
return null
|
||||
@@ -168,9 +198,16 @@ Return a valid JSON object with a single property "fields" containing an array o
|
||||
|
||||
Suggest 3-5 common fields that would be useful for this model type. Use camelCase naming.`
|
||||
|
||||
const response = await window.spark.llm(prompt, 'gpt-4o', true)
|
||||
const parsed = JSON.parse(response)
|
||||
return parsed.fields
|
||||
const result = await ProtectedLLMService.safeLLMCall(
|
||||
prompt,
|
||||
{ jsonMode: true, priority: 'low', category: 'suggest-fields' }
|
||||
)
|
||||
|
||||
if (result) {
|
||||
const parsed = JSON.parse(result)
|
||||
return parsed.fields
|
||||
}
|
||||
return null
|
||||
} catch (error) {
|
||||
console.error('AI field suggestion failed:', error)
|
||||
return null
|
||||
@@ -185,8 +222,12 @@ ${code}
|
||||
|
||||
Provide a clear, concise explanation suitable for developers learning the codebase.`
|
||||
|
||||
const explanation = await window.spark.llm(prompt, 'gpt-4o', false)
|
||||
return explanation.trim()
|
||||
const result = await ProtectedLLMService.safeLLMCall(
|
||||
prompt,
|
||||
{ jsonMode: false, priority: 'low', category: 'explain-code', model: 'gpt-4o-mini' }
|
||||
)
|
||||
|
||||
return result ? result.trim() : null
|
||||
} catch (error) {
|
||||
console.error('AI code explanation failed:', error)
|
||||
return null
|
||||
@@ -240,9 +281,15 @@ Return a valid JSON object with properties "files", "models", and "theme":
|
||||
|
||||
Create 2-4 essential files for the app structure. Include appropriate Prisma models. Design a cohesive theme.`
|
||||
|
||||
const response = await window.spark.llm(prompt, 'gpt-4o', true)
|
||||
const parsed = JSON.parse(response)
|
||||
return parsed
|
||||
const result = await ProtectedLLMService.safeLLMCall(
|
||||
prompt,
|
||||
{ jsonMode: true, priority: 'high', category: 'generate-app' }
|
||||
)
|
||||
|
||||
if (result) {
|
||||
return JSON.parse(result)
|
||||
}
|
||||
return null
|
||||
} catch (error) {
|
||||
console.error('AI app generation failed:', error)
|
||||
return null
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
// @ts-nocheck
|
||||
import { CodeError, ErrorRepairResult } from '@/types/errors'
|
||||
import { ProjectFile } from '@/types/project'
|
||||
import { ProtectedLLMService } from './protected-llm-service'
|
||||
|
||||
/**
|
||||
* ErrorRepairService - AI-powered code error detection and repair
|
||||
@@ -176,7 +178,8 @@ export class ErrorRepairService {
|
||||
.map(err => `Line ${err.line || 'unknown'}: ${err.message} - "${err.code || 'N/A'}"`)
|
||||
.join('\n')
|
||||
|
||||
const promptText = `You are a code repair assistant. Fix the following errors in this code:
|
||||
const result = await ProtectedLLMService.safeLLMCall(
|
||||
window.spark.llmPrompt`You are a code repair assistant. Fix the following errors in this code:
|
||||
|
||||
File: ${file.name} (${file.language})
|
||||
|
||||
@@ -202,16 +205,23 @@ Rules:
|
||||
- Replace "var" with "const" or "let"
|
||||
- Maintain code functionality and structure
|
||||
- Keep the same imports style and formatting
|
||||
- Return the COMPLETE file content, not just the fixes`
|
||||
- Return the COMPLETE file content, not just the fixes`,
|
||||
{ jsonMode: true, priority: 'high', category: 'repair-code' }
|
||||
)
|
||||
|
||||
const response = await window.spark.llm(promptText, 'gpt-4o', true)
|
||||
const parsed = JSON.parse(response)
|
||||
if (result) {
|
||||
const parsed = JSON.parse(result)
|
||||
return {
|
||||
success: true,
|
||||
fixedCode: parsed.fixedCode,
|
||||
explanation: parsed.explanation,
|
||||
remainingIssues: parsed.remainingIssues || [],
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
fixedCode: parsed.fixedCode,
|
||||
explanation: parsed.explanation,
|
||||
remainingIssues: parsed.remainingIssues || [],
|
||||
success: false,
|
||||
explanation: 'Failed to repair code automatically',
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Auto-repair failed:', error)
|
||||
@@ -266,7 +276,8 @@ Rules:
|
||||
.map(f => `${f.path}:\n\`\`\`${f.language}\n${f.content.slice(0, 500)}...\n\`\`\``)
|
||||
.join('\n\n')
|
||||
|
||||
const promptText = `You are a code repair assistant. Fix the following errors in this code, considering the context of related files:
|
||||
const result = await ProtectedLLMService.safeLLMCall(
|
||||
window.spark.llmPrompt`You are a code repair assistant. Fix the following errors in this code, considering the context of related files:
|
||||
|
||||
File: ${file.name} (${file.language})
|
||||
|
||||
@@ -294,16 +305,23 @@ Rules:
|
||||
- Use consistent naming and patterns from related files
|
||||
- Replace "any" types with appropriate types from context
|
||||
- Maintain code functionality and structure
|
||||
- Return the COMPLETE file content, not just the fixes`
|
||||
- Return the COMPLETE file content, not just the fixes`,
|
||||
{ jsonMode: true, priority: 'high', category: 'repair-with-context' }
|
||||
)
|
||||
|
||||
const response = await window.spark.llm(promptText, 'gpt-4o', true)
|
||||
const parsed = JSON.parse(response)
|
||||
if (result) {
|
||||
const parsed = JSON.parse(result)
|
||||
return {
|
||||
success: true,
|
||||
fixedCode: parsed.fixedCode,
|
||||
explanation: parsed.explanation,
|
||||
remainingIssues: parsed.remainingIssues || [],
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
fixedCode: parsed.fixedCode,
|
||||
explanation: parsed.explanation,
|
||||
remainingIssues: parsed.remainingIssues || [],
|
||||
success: false,
|
||||
explanation: 'Failed to repair code automatically',
|
||||
}
|
||||
} catch (error) {
|
||||
console.error('Auto-repair with context failed:', error)
|
||||
|
||||
@@ -0,0 +1,81 @@
|
||||
import { aiRateLimiter, scanRateLimiter } from './rate-limiter'
|
||||
import { toast } from 'sonner'
|
||||
|
||||
interface LLMCallOptions {
|
||||
model?: 'gpt-4o' | 'gpt-4o-mini'
|
||||
jsonMode?: boolean
|
||||
priority?: 'low' | 'medium' | 'high'
|
||||
category?: string
|
||||
}
|
||||
|
||||
export class ProtectedLLMService {
|
||||
private static callCount = 0
|
||||
private static errorCount = 0
|
||||
private static readonly MAX_ERRORS_BEFORE_PAUSE = 5
|
||||
|
||||
static async safeLLMCall(
|
||||
prompt: string,
|
||||
options: LLMCallOptions = {}
|
||||
): Promise<string | null> {
|
||||
const {
|
||||
model = 'gpt-4o',
|
||||
jsonMode = false,
|
||||
priority = 'medium',
|
||||
category = 'general'
|
||||
} = options
|
||||
|
||||
if (this.errorCount >= this.MAX_ERRORS_BEFORE_PAUSE) {
|
||||
console.warn('Too many LLM errors detected. Pausing further calls.')
|
||||
toast.error('AI service temporarily unavailable due to repeated errors')
|
||||
return null
|
||||
}
|
||||
|
||||
try {
|
||||
const key = `llm-${category}`
|
||||
const result = await aiRateLimiter.throttle(
|
||||
key,
|
||||
async () => {
|
||||
this.callCount++
|
||||
return await window.spark.llm(prompt, model, jsonMode)
|
||||
},
|
||||
priority
|
||||
)
|
||||
|
||||
if (result) {
|
||||
this.errorCount = Math.max(0, this.errorCount - 1)
|
||||
}
|
||||
|
||||
return result
|
||||
} catch (error) {
|
||||
this.errorCount++
|
||||
console.error(`LLM call failed (${category}):`, error)
|
||||
|
||||
if (error instanceof Error) {
|
||||
if (error.message.includes('502') || error.message.includes('Bad Gateway')) {
|
||||
toast.error('Service temporarily unavailable - please wait a moment')
|
||||
} else if (error.message.includes('429') || error.message.includes('rate limit')) {
|
||||
toast.error('Too many requests - please slow down')
|
||||
} else {
|
||||
toast.error('AI service error - please try again')
|
||||
}
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
||||
static getStats() {
|
||||
return {
|
||||
totalCalls: this.callCount,
|
||||
errorCount: this.errorCount,
|
||||
isPaused: this.errorCount >= this.MAX_ERRORS_BEFORE_PAUSE
|
||||
}
|
||||
}
|
||||
|
||||
static reset() {
|
||||
this.callCount = 0
|
||||
this.errorCount = 0
|
||||
aiRateLimiter.reset()
|
||||
scanRateLimiter.reset()
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,122 @@
|
||||
interface RateLimitConfig {
|
||||
maxRequests: number
|
||||
windowMs: number
|
||||
retryDelay: number
|
||||
}
|
||||
|
||||
interface RequestRecord {
|
||||
timestamp: number
|
||||
count: number
|
||||
}
|
||||
|
||||
class RateLimiter {
|
||||
private requests: Map<string, RequestRecord> = new Map()
|
||||
private config: RateLimitConfig
|
||||
|
||||
constructor(config: RateLimitConfig = {
|
||||
maxRequests: 5,
|
||||
windowMs: 60000,
|
||||
retryDelay: 2000
|
||||
}) {
|
||||
this.config = config
|
||||
}
|
||||
|
||||
async throttle<T>(
|
||||
key: string,
|
||||
fn: () => Promise<T>,
|
||||
priority: 'low' | 'medium' | 'high' = 'medium'
|
||||
): Promise<T | null> {
|
||||
const now = Date.now()
|
||||
const record = this.requests.get(key)
|
||||
|
||||
if (record) {
|
||||
const timeElapsed = now - record.timestamp
|
||||
|
||||
if (timeElapsed < this.config.windowMs) {
|
||||
if (record.count >= this.config.maxRequests) {
|
||||
console.warn(`Rate limit exceeded for ${key}. Try again in ${Math.ceil((this.config.windowMs - timeElapsed) / 1000)}s`)
|
||||
|
||||
if (priority === 'high') {
|
||||
await new Promise(resolve => setTimeout(resolve, this.config.retryDelay))
|
||||
return this.throttle(key, fn, priority)
|
||||
}
|
||||
|
||||
return null
|
||||
}
|
||||
|
||||
record.count++
|
||||
} else {
|
||||
this.requests.set(key, { timestamp: now, count: 1 })
|
||||
}
|
||||
} else {
|
||||
this.requests.set(key, { timestamp: now, count: 1 })
|
||||
}
|
||||
|
||||
this.cleanup()
|
||||
|
||||
try {
|
||||
return await fn()
|
||||
} catch (error) {
|
||||
if (error instanceof Error && (
|
||||
error.message.includes('502') ||
|
||||
error.message.includes('Bad Gateway') ||
|
||||
error.message.includes('429') ||
|
||||
error.message.includes('rate limit')
|
||||
)) {
|
||||
console.error(`Gateway error for ${key}:`, error.message)
|
||||
if (record) {
|
||||
record.count = this.config.maxRequests
|
||||
}
|
||||
}
|
||||
throw error
|
||||
}
|
||||
}
|
||||
|
||||
private cleanup() {
|
||||
const now = Date.now()
|
||||
for (const [key, record] of this.requests.entries()) {
|
||||
if (now - record.timestamp > this.config.windowMs * 2) {
|
||||
this.requests.delete(key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
reset(key?: string) {
|
||||
if (key) {
|
||||
this.requests.delete(key)
|
||||
} else {
|
||||
this.requests.clear()
|
||||
}
|
||||
}
|
||||
|
||||
getStatus(key: string): { remaining: number; resetIn: number } {
|
||||
const record = this.requests.get(key)
|
||||
if (!record) {
|
||||
return { remaining: this.config.maxRequests, resetIn: 0 }
|
||||
}
|
||||
|
||||
const now = Date.now()
|
||||
const timeElapsed = now - record.timestamp
|
||||
|
||||
if (timeElapsed >= this.config.windowMs) {
|
||||
return { remaining: this.config.maxRequests, resetIn: 0 }
|
||||
}
|
||||
|
||||
return {
|
||||
remaining: Math.max(0, this.config.maxRequests - record.count),
|
||||
resetIn: Math.ceil((this.config.windowMs - timeElapsed) / 1000)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const aiRateLimiter = new RateLimiter({
|
||||
maxRequests: 3,
|
||||
windowMs: 60000,
|
||||
retryDelay: 3000
|
||||
})
|
||||
|
||||
export const scanRateLimiter = new RateLimiter({
|
||||
maxRequests: 1,
|
||||
windowMs: 30000,
|
||||
retryDelay: 5000
|
||||
})
|
||||
Reference in New Issue
Block a user