feat: Implement LLM streaming support and enhance event handling in review process

This commit is contained in:
Primakov Alexandr Alexandrovich
2025-10-13 17:48:03 +03:00
parent 2f29ccff74
commit 1d953f554b
6 changed files with 107 additions and 45 deletions

View File

@@ -315,6 +315,17 @@ class ReviewerAgent:
print(f" ⚠️ ПРОПУСК: patch пустой или слишком маленький")
continue
# Callback для LLM streaming
async def on_llm_chunk(chunk: str, file: str):
"""Handle LLM streaming chunks"""
if self._stream_callback:
await self._stream_callback({
"type": "llm_chunk",
"chunk": chunk,
"file_path": file,
"message": chunk
})
# Analyze diff with PR context
pr_info = state.get("pr_info", {})
comments = await self.analyzer.analyze_diff(
@@ -322,7 +333,8 @@ class ReviewerAgent:
diff=patch,
language=language,
pr_title=pr_info.get("title", ""),
pr_description=pr_info.get("description", "")
pr_description=pr_info.get("description", ""),
on_llm_chunk=on_llm_chunk
)
print(f" 💬 Получено комментариев: {len(comments)}")

View File

@@ -99,7 +99,8 @@ class CodeAnalyzer:
diff: str,
language: Optional[str] = None,
pr_title: str = "",
pr_description: str = ""
pr_description: str = "",
on_llm_chunk: Optional[callable] = None
) -> List[Dict[str, Any]]:
"""Analyze code diff and return comments"""
@@ -154,13 +155,32 @@ class CodeAnalyzer:
try:
print(f"\n⏳ Отправка запроса к Ollama ({self.llm.model})...")
# Создаем chain с LLM и JSON парсером
chain = self.llm | self.json_parser
# Собираем полный ответ из streaming chunks
full_response = ""
chunk_count = 0
# Получаем результат
result = await chain.ainvoke(prompt)
print(f"\n🤖 STREAMING AI ответ:")
print("-" * 80)
print(f"\n🤖 ОТВЕТ AI (распарсен через JsonOutputParser):")
# Используем streaming
async for chunk in self.llm.astream(prompt):
chunk_count += 1
full_response += chunk
# Отправляем chunk через callback
if on_llm_chunk:
await on_llm_chunk(chunk, file_path)
# Показываем в консоли
print(chunk, end='', flush=True)
print("\n" + "-" * 80)
print(f"✅ Получено {chunk_count} chunks, всего {len(full_response)} символов")
# Парсим финальный результат
result = self.json_parser.parse(full_response)
print(f"\n🤖 РАСПАРСЕННЫЙ результат:")
print("-" * 80)
print(json.dumps(result, ensure_ascii=False, indent=2)[:500] + "...")
print("-" * 80)