feat: Implement LLM streaming support and enhance event handling in review process
This commit is contained in:
parent
2f29ccff74
commit
1d953f554b
@ -315,6 +315,17 @@ class ReviewerAgent:
|
||||
print(f" ⚠️ ПРОПУСК: patch пустой или слишком маленький")
|
||||
continue
|
||||
|
||||
# Callback для LLM streaming
|
||||
async def on_llm_chunk(chunk: str, file: str):
|
||||
"""Handle LLM streaming chunks"""
|
||||
if self._stream_callback:
|
||||
await self._stream_callback({
|
||||
"type": "llm_chunk",
|
||||
"chunk": chunk,
|
||||
"file_path": file,
|
||||
"message": chunk
|
||||
})
|
||||
|
||||
# Analyze diff with PR context
|
||||
pr_info = state.get("pr_info", {})
|
||||
comments = await self.analyzer.analyze_diff(
|
||||
@ -322,7 +333,8 @@ class ReviewerAgent:
|
||||
diff=patch,
|
||||
language=language,
|
||||
pr_title=pr_info.get("title", ""),
|
||||
pr_description=pr_info.get("description", "")
|
||||
pr_description=pr_info.get("description", ""),
|
||||
on_llm_chunk=on_llm_chunk
|
||||
)
|
||||
|
||||
print(f" 💬 Получено комментариев: {len(comments)}")
|
||||
|
||||
@ -99,7 +99,8 @@ class CodeAnalyzer:
|
||||
diff: str,
|
||||
language: Optional[str] = None,
|
||||
pr_title: str = "",
|
||||
pr_description: str = ""
|
||||
pr_description: str = "",
|
||||
on_llm_chunk: Optional[callable] = None
|
||||
) -> List[Dict[str, Any]]:
|
||||
"""Analyze code diff and return comments"""
|
||||
|
||||
@ -154,13 +155,32 @@ class CodeAnalyzer:
|
||||
try:
|
||||
print(f"\n⏳ Отправка запроса к Ollama ({self.llm.model})...")
|
||||
|
||||
# Создаем chain с LLM и JSON парсером
|
||||
chain = self.llm | self.json_parser
|
||||
# Собираем полный ответ из streaming chunks
|
||||
full_response = ""
|
||||
chunk_count = 0
|
||||
|
||||
# Получаем результат
|
||||
result = await chain.ainvoke(prompt)
|
||||
print(f"\n🤖 STREAMING AI ответ:")
|
||||
print("-" * 80)
|
||||
|
||||
print(f"\n🤖 ОТВЕТ AI (распарсен через JsonOutputParser):")
|
||||
# Используем streaming
|
||||
async for chunk in self.llm.astream(prompt):
|
||||
chunk_count += 1
|
||||
full_response += chunk
|
||||
|
||||
# Отправляем chunk через callback
|
||||
if on_llm_chunk:
|
||||
await on_llm_chunk(chunk, file_path)
|
||||
|
||||
# Показываем в консоли
|
||||
print(chunk, end='', flush=True)
|
||||
|
||||
print("\n" + "-" * 80)
|
||||
print(f"✅ Получено {chunk_count} chunks, всего {len(full_response)} символов")
|
||||
|
||||
# Парсим финальный результат
|
||||
result = self.json_parser.parse(full_response)
|
||||
|
||||
print(f"\n🤖 РАСПАРСЕННЫЙ результат:")
|
||||
print("-" * 80)
|
||||
print(json.dumps(result, ensure_ascii=False, indent=2)[:500] + "...")
|
||||
print("-" * 80)
|
||||
|
||||
@ -147,19 +147,20 @@ async def run_review_task(review_id: int, pr_number: int, repository_id: int, db
|
||||
"data": event
|
||||
}
|
||||
|
||||
# Save to DB
|
||||
from app.models.review_event import ReviewEvent
|
||||
db_event = ReviewEvent(
|
||||
review_id=review_id,
|
||||
event_type=event.get("type", "agent_update"),
|
||||
step=event.get("step"),
|
||||
message=event.get("message"),
|
||||
data=event
|
||||
)
|
||||
db.add(db_event)
|
||||
await db.commit()
|
||||
# Save to DB (НЕ сохраняем llm_chunk - их слишком много)
|
||||
if event.get("type") != "llm_chunk":
|
||||
from app.models.review_event import ReviewEvent
|
||||
db_event = ReviewEvent(
|
||||
review_id=review_id,
|
||||
event_type=event.get("type", "agent_update"),
|
||||
step=event.get("step"),
|
||||
message=event.get("message"),
|
||||
data=event
|
||||
)
|
||||
db.add(db_event)
|
||||
await db.commit()
|
||||
|
||||
# Broadcast
|
||||
# Broadcast (отправляем все события, включая llm_chunk)
|
||||
await manager.broadcast(event_data)
|
||||
except Exception as e:
|
||||
print(f"Error in review event handler: {e}")
|
||||
|
||||
@ -31,19 +31,20 @@ async def start_review_task(review_id: int, pr_number: int, repository_id: int):
|
||||
"data": event
|
||||
}
|
||||
|
||||
# Save to DB
|
||||
from app.models.review_event import ReviewEvent
|
||||
db_event = ReviewEvent(
|
||||
review_id=review_id,
|
||||
event_type=event.get("type", "agent_update"),
|
||||
step=event.get("step"),
|
||||
message=event.get("message"),
|
||||
data=event
|
||||
)
|
||||
db.add(db_event)
|
||||
await db.commit()
|
||||
# Save to DB (НЕ сохраняем llm_chunk - их слишком много)
|
||||
if event.get("type") != "llm_chunk":
|
||||
from app.models.review_event import ReviewEvent
|
||||
db_event = ReviewEvent(
|
||||
review_id=review_id,
|
||||
event_type=event.get("type", "agent_update"),
|
||||
step=event.get("step"),
|
||||
message=event.get("message"),
|
||||
data=event
|
||||
)
|
||||
db.add(db_event)
|
||||
await db.commit()
|
||||
|
||||
# Broadcast
|
||||
# Broadcast (отправляем все события, включая llm_chunk)
|
||||
await manager.broadcast(event_data)
|
||||
except Exception as e:
|
||||
print(f"Error in webhook review event handler: {e}")
|
||||
|
||||
@ -227,21 +227,22 @@ class ReviewTaskWorker:
|
||||
print(f" Prepared event_data: {event_data}")
|
||||
logger.info(f" 🔔 Broadcasting event: type={event.get('type')}, connections={len(manager.active_connections)}")
|
||||
|
||||
# Save event to database
|
||||
from app.models.review_event import ReviewEvent
|
||||
db_event = ReviewEvent(
|
||||
review_id=review.id,
|
||||
event_type=event.get("type", "agent_update"),
|
||||
step=event.get("step"),
|
||||
message=event.get("message"),
|
||||
data=event
|
||||
)
|
||||
db.add(db_event)
|
||||
await db.commit()
|
||||
print(f" ✓ Event saved to DB: {db_event.id}")
|
||||
logger.debug(f" 💾 Event saved to DB: {db_event.id}")
|
||||
# Save event to database (НЕ сохраняем llm_chunk - их слишком много)
|
||||
if event.get("type") != "llm_chunk":
|
||||
from app.models.review_event import ReviewEvent
|
||||
db_event = ReviewEvent(
|
||||
review_id=review.id,
|
||||
event_type=event.get("type", "agent_update"),
|
||||
step=event.get("step"),
|
||||
message=event.get("message"),
|
||||
data=event
|
||||
)
|
||||
db.add(db_event)
|
||||
await db.commit()
|
||||
print(f" ✓ Event saved to DB: {db_event.id}")
|
||||
logger.debug(f" 💾 Event saved to DB: {db_event.id}")
|
||||
|
||||
# Broadcast to all connected clients
|
||||
# Broadcast to all connected clients (отправляем все, включая llm_chunk)
|
||||
print(f" Broadcasting to {len(manager.active_connections)} connections...")
|
||||
await manager.broadcast(event_data)
|
||||
print(f" ✓ Broadcast completed")
|
||||
@ -250,6 +251,9 @@ class ReviewTaskWorker:
|
||||
if event.get("type") == "agent_step":
|
||||
step = event.get("step", "unknown")
|
||||
logger.info(f" 📍 Step: {step}")
|
||||
elif event.get("type") == "llm_chunk":
|
||||
# Не логируем каждый chunk, слишком много
|
||||
pass
|
||||
elif event.get("type") == "llm_message":
|
||||
message = event.get("message", "")[:100]
|
||||
logger.info(f" 💬 LLM: {message}...")
|
||||
|
||||
@ -32,6 +32,7 @@ export const ReviewStream: React.FC<ReviewStreamProps> = ({ reviewId }) => {
|
||||
const [currentStep, setCurrentStep] = useState<string>('');
|
||||
const [isConnected, setIsConnected] = useState(false);
|
||||
const [llmMessages, setLlmMessages] = useState<string[]>([]);
|
||||
const [llmStreamingText, setLlmStreamingText] = useState<string>('');
|
||||
|
||||
useEffect(() => {
|
||||
console.log('🔌 Connecting to WebSocket:', WS_URL);
|
||||
@ -126,6 +127,12 @@ export const ReviewStream: React.FC<ReviewStreamProps> = ({ reviewId }) => {
|
||||
setCurrentStep(step || '');
|
||||
}
|
||||
|
||||
// Handle LLM streaming chunks
|
||||
if (data.type === 'llm_chunk' || data.data?.type === 'llm_chunk') {
|
||||
const chunk = data.data?.chunk || data.chunk || '';
|
||||
setLlmStreamingText((prev) => prev + chunk);
|
||||
}
|
||||
|
||||
// Collect LLM messages
|
||||
if (data.type === 'llm_message' || data.data?.type === 'llm_message') {
|
||||
const message = data.data?.message || data.message;
|
||||
@ -136,6 +143,8 @@ export const ReviewStream: React.FC<ReviewStreamProps> = ({ reviewId }) => {
|
||||
// Handle special events
|
||||
if (data.type === 'review_started') {
|
||||
console.log('🎬 Review started:', data.data?.message);
|
||||
// Сбрасываем streaming текст при начале нового review
|
||||
setLlmStreamingText('');
|
||||
}
|
||||
|
||||
if (data.type === 'review_completed') {
|
||||
@ -319,6 +328,21 @@ export const ReviewStream: React.FC<ReviewStreamProps> = ({ reviewId }) => {
|
||||
|
||||
{renderStepProgress()}
|
||||
|
||||
{/* LLM Streaming текст */}
|
||||
{llmStreamingText && (
|
||||
<div className="mt-6">
|
||||
<h3 className="text-lg font-semibold text-dark-text-primary mb-3 flex items-center gap-2">
|
||||
<span className="animate-pulse">🤖</span> AI генерирует ответ...
|
||||
</h3>
|
||||
<div className="bg-dark-card border border-dark-border rounded-lg p-4">
|
||||
<pre className="text-sm text-dark-text-secondary font-mono whitespace-pre-wrap break-words max-h-96 overflow-y-auto">
|
||||
{llmStreamingText}
|
||||
<span className="animate-pulse">▋</span>
|
||||
</pre>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
|
||||
<div className="mt-6">
|
||||
<h3 className="text-lg font-semibold text-dark-text-primary mb-3">
|
||||
📝 События
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user