feat: Add review events persistence, version display, and auto-versioning system

This commit is contained in:
Primakov Alexandr Alexandrovich 2025-10-13 14:18:37 +03:00
parent cfba28f913
commit 2db1225618
56 changed files with 750 additions and 436 deletions

66
.git-hooks/README.md Normal file
View File

@ -0,0 +1,66 @@
# Git Hooks
Эта папка содержит пользовательские git hooks для автоматизации задач.
## Установка
Чтобы использовать эти hooks, выполните:
```bash
# Из корня проекта
git config core.hooksPath .git-hooks
# Сделать hooks исполняемыми
chmod +x .git-hooks/pre-commit
```
## Hooks
### pre-commit
Автоматически повышает версию backend при изменениях в `backend/` директории.
**Правила повышения версии:**
- `feat:` или `feature:` - повышает MINOR версию (0.1.0 → 0.2.0)
- `fix:` или `bugfix:` - повышает PATCH версию (0.1.0 → 0.1.1)
- `BREAKING:` или `major:` - повышает MAJOR версию (0.1.0 → 1.0.0)
- Остальные - повышают PATCH версию
**Примеры коммитов:**
```bash
git commit -m "feat: Add new feature" # 0.1.0 → 0.2.0
git commit -m "fix: Fix bug" # 0.1.0 → 0.1.1
git commit -m "BREAKING: Major changes" # 0.1.0 → 1.0.0
```
## Ручное повышение версии
Вы можете вручную повысить версию:
```bash
# Patch version (0.1.0 → 0.1.1)
bash bump_version.sh patch
# Minor version (0.1.0 → 0.2.0)
bash bump_version.sh minor
# Major version (0.1.0 → 1.0.0)
bash bump_version.sh major
```
## Отключение hooks
Если вы хотите временно отключить hooks:
```bash
git commit --no-verify -m "Your message"
```
Или полностью отключить:
```bash
git config core.hooksPath .git/hooks
```

24
.git-hooks/pre-commit Normal file
View File

@ -0,0 +1,24 @@
#!/bin/bash
# Pre-commit hook для автоповышения версии
echo "🔄 Проверка версии backend..."
# Проверка, есть ли изменения в backend
if git diff --cached --name-only | grep -q '^backend/'; then
echo "📝 Обнаружены изменения в backend, обновление версии..."
# Запуск скрипта повышения версии
bash bump_version.sh
# Проверка, был ли изменен файл версии
if git diff --name-only | grep -q '^backend/VERSION'; then
echo "✅ Версия обновлена, добавляем в коммит"
git add backend/VERSION
fi
else
echo " Изменений в backend нет, версия не обновляется"
fi
exit 0

View File

@ -1,73 +0,0 @@
./ARCHITECTURE.md
./backend/app/__init__.py
./backend/app/agents/__init__.py
./backend/app/agents/prompts.py
./backend/app/agents/reviewer.py
./backend/app/agents/tools.py
./backend/app/api/__init__.py
./backend/app/api/repositories.py
./backend/app/api/reviews.py
./backend/app/api/webhooks.py
./backend/app/config.py
./backend/app/database.py
./backend/app/main.py
./backend/app/models/__init__.py
./backend/app/models/comment.py
./backend/app/models/pull_request.py
./backend/app/models/repository.py
./backend/app/models/review.py
./backend/app/schemas/__init__.py
./backend/app/schemas/repository.py
./backend/app/schemas/review.py
./backend/app/schemas/webhook.py
./backend/app/services/__init__.py
./backend/app/services/base.py
./backend/app/services/bitbucket.py
./backend/app/services/gitea.py
./backend/app/services/github.py
./backend/app/utils.py
./backend/app/webhooks/__init__.py
./backend/app/webhooks/bitbucket.py
./backend/app/webhooks/gitea.py
./backend/app/webhooks/github.py
./backend/README.md
./backend/requirements.txt
./backend/start.bat
./backend/start.sh
./cloud.md
./COMMANDS.md
./CONTRIBUTING.md
./FILES_LIST.txt
./frontend/index.html
./frontend/package.json
./frontend/postcss.config.js
./frontend/README.md
./frontend/src/api/client.ts
./frontend/src/api/websocket.ts
./frontend/src/App.tsx
./frontend/src/components/CommentsList.tsx
./frontend/src/components/RepositoryForm.tsx
./frontend/src/components/RepositoryList.tsx
./frontend/src/components/ReviewList.tsx
./frontend/src/components/ReviewProgress.tsx
./frontend/src/components/WebSocketStatus.tsx
./frontend/src/index.css
./frontend/src/main.tsx
./frontend/src/pages/Dashboard.tsx
./frontend/src/pages/Repositories.tsx
./frontend/src/pages/ReviewDetail.tsx
./frontend/src/pages/Reviews.tsx
./frontend/src/types/index.ts
./frontend/src/vite-env.d.ts
./frontend/start.bat
./frontend/start.sh
./frontend/tailwind.config.js
./frontend/tsconfig.json
./frontend/tsconfig.node.json
./frontend/vite.config.ts
./LICENSE
./PROJECT_STATUS.md
./PROJECT_STRUCTURE.txt
./QUICKSTART.md
./README.md
./SUMMARY.md

View File

@ -1,33 +0,0 @@
./.gitignore
./ARCHITECTURE.md
./backend/app/config.py
./backend/app/database.py
./backend/app/main.py
./backend/app/utils.py
./backend/app/__init__.py
./backend/README.md
./backend/requirements.txt
./backend/start.bat
./backend/start.sh
./cloud.md
./COMMANDS.md
./CONTRIBUTING.md
./frontend/.eslintrc.cjs
./frontend/index.html
./frontend/package.json
./frontend/postcss.config.js
./frontend/README.md
./frontend/src/App.tsx
./frontend/src/index.css
./frontend/src/main.tsx
./frontend/src/vite-env.d.ts
./frontend/start.bat
./frontend/start.sh
./frontend/tailwind.config.js
./frontend/tsconfig.json
./frontend/tsconfig.node.json
./frontend/vite.config.ts
./LICENSE
./PROJECT_STRUCTURE.txt
./QUICKSTART.md
./README.md

1
backend/VERSION Normal file
View File

@ -0,0 +1 @@
0.1.0

View File

@ -552,7 +552,7 @@ class ReviewerAgent:
try: try:
async for event in self.graph.astream( async for event in self.graph.astream(
initial_state, initial_state,
stream_mode=["updates"] stream_mode=["updates", "messages"]
): ):
event_count += 1 event_count += 1
print(f"📨 Event #{event_count} received from graph") print(f"📨 Event #{event_count} received from graph")
@ -581,6 +581,28 @@ class ReviewerAgent:
if isinstance(node_data, dict): if isinstance(node_data, dict):
final_state = node_data final_state = node_data
# Handle 'messages' events (LLM streaming)
elif event_type == 'messages':
print(f" 💬 LLM messages received")
# event_data is a list of messages
if isinstance(event_data, (list, tuple)):
for msg in event_data:
# Check if it's an AIMessage or similar
msg_content = None
if hasattr(msg, 'content'):
msg_content = msg.content
elif isinstance(msg, dict) and 'content' in msg:
msg_content = msg['content']
else:
msg_content = str(msg)
if msg_content and on_event:
print(f" 💬 Sending LLM message: {msg_content[:100]}...")
await on_event({
"type": "llm_message",
"message": msg_content
})
# Handle 'values' events (state snapshots) # Handle 'values' events (state snapshots)
elif event_type == 'values': elif event_type == 'values':
print(f" 📊 State snapshot received") print(f" 📊 State snapshot received")

View File

@ -6,9 +6,11 @@ from sqlalchemy import select, func
from sqlalchemy.orm import joinedload from sqlalchemy.orm import joinedload
from app.database import get_db from app.database import get_db
from app.models import Review, Comment, PullRequest from app.models import Review, Comment, PullRequest, ReviewEvent
from app.schemas.review import ReviewResponse, ReviewList, ReviewStats, PullRequestInfo, CommentResponse from app.schemas.review import ReviewResponse, ReviewList, ReviewStats, PullRequestInfo, CommentResponse
from app.schemas.review_event import ReviewEvent as ReviewEventSchema
from app.agents import ReviewerAgent from app.agents import ReviewerAgent
from typing import List
router = APIRouter() router = APIRouter()
@ -216,3 +218,27 @@ async def get_review_stats(db: AsyncSession = Depends(get_db)):
avg_comments_per_review=round(avg_comments, 2) avg_comments_per_review=round(avg_comments, 2)
) )
@router.get("/{review_id}/events", response_model=List[ReviewEventSchema])
async def get_review_events(
review_id: int,
db: AsyncSession = Depends(get_db)
):
"""Get all events for a specific review"""
# Check if review exists
result = await db.execute(select(Review).where(Review.id == review_id))
review = result.scalar_one_or_none()
if not review:
raise HTTPException(status_code=404, detail="Review not found")
# Get events
events_result = await db.execute(
select(ReviewEvent)
.where(ReviewEvent.review_id == review_id)
.order_by(ReviewEvent.created_at)
)
events = events_result.scalars().all()
return events

View File

@ -118,6 +118,20 @@ async def health_check():
return {"status": "healthy"} return {"status": "healthy"}
@app.get("/version")
async def get_version():
"""Get backend version"""
try:
version_file = Path(__file__).parent.parent / "VERSION"
if version_file.exists():
version = version_file.read_text().strip()
else:
version = "unknown"
return {"version": version}
except Exception:
return {"version": "unknown"}
@app.websocket("/ws/reviews") @app.websocket("/ws/reviews")
async def websocket_endpoint(websocket: WebSocket): async def websocket_endpoint(websocket: WebSocket):
"""WebSocket endpoint for real-time review updates""" """WebSocket endpoint for real-time review updates"""

View File

@ -6,6 +6,7 @@ from app.models.review import Review
from app.models.comment import Comment from app.models.comment import Comment
from app.models.organization import Organization from app.models.organization import Organization
from app.models.review_task import ReviewTask from app.models.review_task import ReviewTask
from app.models.review_event import ReviewEvent
__all__ = ["Repository", "PullRequest", "Review", "Comment", "Organization", "ReviewTask"] __all__ = ["Repository", "PullRequest", "Review", "Comment", "Organization", "ReviewTask", "ReviewEvent"]

View File

@ -37,6 +37,7 @@ class Review(Base):
# Relationships # Relationships
pull_request = relationship("PullRequest", back_populates="reviews") pull_request = relationship("PullRequest", back_populates="reviews")
comments = relationship("Comment", back_populates="review", cascade="all, delete-orphan") comments = relationship("Comment", back_populates="review", cascade="all, delete-orphan")
events = relationship("ReviewEvent", back_populates="review", cascade="all, delete-orphan", order_by="ReviewEvent.created_at")
def __repr__(self): def __repr__(self):
return f"<Review(id={self.id}, status={self.status}, pr_id={self.pull_request_id})>" return f"<Review(id={self.id}, status={self.status}, pr_id={self.pull_request_id})>"

View File

@ -0,0 +1,27 @@
"""Review Event model - хранение событий процесса review"""
from sqlalchemy import Column, Integer, String, Text, DateTime, ForeignKey, JSON
from sqlalchemy.orm import relationship
from datetime import datetime
from app.database import Base
class ReviewEvent(Base):
"""Событие процесса review"""
__tablename__ = "review_events"
id = Column(Integer, primary_key=True, index=True)
review_id = Column(Integer, ForeignKey("reviews.id", ondelete="CASCADE"), nullable=False, index=True)
event_type = Column(String(50), nullable=False) # agent_step, llm_message, review_started, etc.
step = Column(String(100), nullable=True) # fetch_pr_info, analyze_files, etc.
message = Column(Text, nullable=True)
data = Column(JSON, nullable=True) # Дополнительные данные события
created_at = Column(DateTime, default=datetime.utcnow, nullable=False, index=True)
# Relationships
review = relationship("Review", back_populates="events")
def __repr__(self):
return f"<ReviewEvent(id={self.id}, review_id={self.review_id}, type={self.event_type})>"

View File

@ -23,6 +23,10 @@ from app.schemas.streaming import (
ReviewProgressEvent, ReviewProgressEvent,
StreamEventType StreamEventType
) )
from app.schemas.review_event import (
ReviewEvent as ReviewEventSchema,
ReviewEventCreate
)
__all__ = [ __all__ = [
"RepositoryCreate", "RepositoryCreate",
@ -40,5 +44,7 @@ __all__ = [
"LLMStreamEvent", "LLMStreamEvent",
"ReviewProgressEvent", "ReviewProgressEvent",
"StreamEventType", "StreamEventType",
"ReviewEventSchema",
"ReviewEventCreate",
] ]

View File

@ -0,0 +1,29 @@
"""Review Event schemas"""
from pydantic import BaseModel, Field
from datetime import datetime
from typing import Optional, Dict, Any
class ReviewEventBase(BaseModel):
"""Base review event schema"""
event_type: str = Field(..., description="Тип события")
step: Optional[str] = Field(None, description="Шаг процесса")
message: Optional[str] = Field(None, description="Сообщение")
data: Optional[Dict[str, Any]] = Field(None, description="Дополнительные данные")
class ReviewEventCreate(ReviewEventBase):
"""Schema for creating review event"""
review_id: int
class ReviewEvent(ReviewEventBase):
"""Review event response schema"""
id: int
review_id: int
created_at: datetime
class Config:
from_attributes = True

View File

@ -200,6 +200,19 @@ class ReviewTaskWorker:
logger.info(f" 🔔 Broadcasting event: type={event.get('type')}, connections={len(manager.active_connections)}") logger.info(f" 🔔 Broadcasting event: type={event.get('type')}, connections={len(manager.active_connections)}")
# Save event to database
from app.models.review_event import ReviewEvent
db_event = ReviewEvent(
review_id=review.id,
event_type=event.get("type", "agent_update"),
step=event.get("step"),
message=event.get("message"),
data=event
)
db.add(db_event)
await db.commit()
logger.debug(f" 💾 Event saved to DB: {db_event.id}")
# Broadcast to all connected clients # Broadcast to all connected clients
await manager.broadcast(event_data) await manager.broadcast(event_data)

View File

@ -4,7 +4,7 @@
import asyncio import asyncio
from app.database import engine, Base from app.database import engine, Base
from app.models import Organization, ReviewTask, Repository, PullRequest, Review, Comment from app.models import Organization, ReviewTask, Repository, PullRequest, Review, Comment, ReviewEvent
async def create_tables(): async def create_tables():
@ -20,6 +20,7 @@ async def create_tables():
print(" - pull_requests") print(" - pull_requests")
print(" - reviews") print(" - reviews")
print(" - comments") print(" - comments")
print(" - review_events")
if __name__ == "__main__": if __name__ == "__main__":

View File

@ -0,0 +1,17 @@
-- Migration: Add review_events table
CREATE TABLE IF NOT EXISTS review_events (
id INTEGER PRIMARY KEY AUTOINCREMENT,
review_id INTEGER NOT NULL,
event_type VARCHAR(50) NOT NULL,
step VARCHAR(100),
message TEXT,
data JSON,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
FOREIGN KEY (review_id) REFERENCES reviews(id) ON DELETE CASCADE
);
-- Create indexes for better performance
CREATE INDEX IF NOT EXISTS idx_review_events_review_id ON review_events(review_id);
CREATE INDEX IF NOT EXISTS idx_review_events_created_at ON review_events(created_at);

0
bump_version.sh Normal file
View File

View File

@ -1,34 +0,0 @@
#!/bin/bash
echo "=========================================="
echo "AI Review Service - Status Check"
echo "=========================================="
echo ""
echo "1. Service Status:"
systemctl status ai-review.service --no-pager
echo ""
echo "=========================================="
echo "2. Last 100 lines of logs:"
echo "=========================================="
journalctl -u ai-review.service -n 100 --no-pager
echo ""
echo "=========================================="
echo "3. Checking files:"
echo "=========================================="
echo "Backend exists: $([ -d /home/user/code-review-agent/backend ] && echo 'YES' || echo 'NO')"
echo "Frontend exists: $([ -d /home/user/code-review-agent/frontend ] && echo 'YES' || echo 'NO')"
echo "Public dir exists: $([ -d /home/user/code-review-agent/backend/public ] && echo 'YES' || echo 'NO')"
echo "venv exists: $([ -d /home/user/code-review-agent/backend/venv ] && echo 'YES' || echo 'NO')"
echo ".env exists: $([ -f /home/user/code-review-agent/backend/.env ] && echo 'YES' || echo 'NO')"
echo "DB exists: $([ -f /home/user/code-review-agent/backend/review.db ] && echo 'YES' || echo 'NO')"
echo ""
echo "=========================================="
echo "4. Manual start test:"
echo "=========================================="
echo "Run this command to see actual error:"
echo "cd /home/user/code-review-agent/backend && source venv/bin/activate && python -m uvicorn app.main:app --host 0.0.0.0 --port 8000"

View File

@ -1,108 +0,0 @@
#!/bin/bash
echo "=========================================="
echo "AI Review - Диагностика и исправление"
echo "=========================================="
echo ""
# Определить директорию установки (где находится скрипт)
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
INSTALL_DIR="$SCRIPT_DIR"
echo "Working directory: $INSTALL_DIR"
echo ""
echo "1. Проверка файлов:"
echo " - Backend: $([ -d $INSTALL_DIR/backend ] && echo '✓' || echo '✗')"
echo " - Frontend: $([ -d $INSTALL_DIR/frontend ] && echo '✓' || echo '✗')"
echo " - venv: $([ -d $INSTALL_DIR/backend/venv ] && echo '✓' || echo '✗ MISSING')"
echo " - venv/bin/python: $([ -f $INSTALL_DIR/backend/venv/bin/python ] && echo '✓' || echo '✗ MISSING')"
echo " - venv/bin/python3: $([ -f $INSTALL_DIR/backend/venv/bin/python3 ] && echo '✓' || echo '✗ MISSING')"
echo " - public: $([ -d $INSTALL_DIR/backend/public ] && echo '✓' || echo '✗ MISSING')"
echo " - DB: $([ -f $INSTALL_DIR/backend/review.db ] && echo '✓' || echo '⚠️ будет создана')"
echo ""
# Проверить что именно в venv
if [ -d "$INSTALL_DIR/backend/venv" ]; then
echo "2. Содержимое venv/bin/:"
ls -la "$INSTALL_DIR/backend/venv/bin/" | head -20
echo ""
fi
echo "=========================================="
echo "Исправление"
echo "=========================================="
echo ""
cd "$INSTALL_DIR/backend"
# Удалить старый venv если есть
if [ -d "venv" ]; then
echo "Удаление старого venv..."
rm -rf venv
fi
# Создать новый venv
echo "Создание нового venv..."
python3 -m venv venv
# Проверить создание
if [ ! -f "venv/bin/python" ] && [ ! -f "venv/bin/python3" ]; then
echo "✗ ОШИБКА: venv не создан правильно!"
echo ""
echo "Попробуйте:"
echo " sudo apt-get install python3-venv"
echo " python3 -m venv venv"
exit 1
fi
echo "✓ venv создан"
# Активировать и установить зависимости
echo "Установка зависимостей..."
source venv/bin/activate
pip install --upgrade pip > /dev/null
pip install -r requirements.txt
echo "✓ Зависимости установлены"
# Применить миграции
if [ -f "migrate.py" ]; then
echo "Применение миграций..."
python migrate.py
echo "✓ Миграции применены"
fi
echo ""
echo "=========================================="
echo "Проверка"
echo "=========================================="
echo ""
echo "Попытка запуска (5 секунд)..."
timeout 5 python -m uvicorn app.main:app --host 0.0.0.0 --port 8000 2>&1 | head -20 &
UVICORN_PID=$!
sleep 6
if ps -p $UVICORN_PID > /dev/null 2>&1; then
echo "✓ Uvicorn запустился успешно"
kill $UVICORN_PID 2>/dev/null
else
echo "⚠️ Uvicorn остановился (это нормально для теста)"
fi
echo ""
echo "=========================================="
echo "Готово!"
echo "=========================================="
echo ""
echo "Теперь перезапустите сервис:"
echo " sudo systemctl restart ai-review"
echo " sudo systemctl status ai-review"
echo ""
echo "Или запустите вручную для теста:"
echo " cd $INSTALL_DIR/backend"
echo " source venv/bin/activate"
echo " python -m uvicorn app.main:app --host 0.0.0.0 --port 8000"

53
docs/README.md Normal file
View File

@ -0,0 +1,53 @@
# Документация AI Code Review Agent
Здесь собрана вся документация проекта.
## Быстрый старт
- [QUICKSTART.md](QUICKSTART.md) - Быстрый старт проекта
- [START_PROJECT.md](START_PROJECT.md) - Детальная инструкция по запуску
## Развертывание
- [DEPLOYMENT.md](DEPLOYMENT.md) - Общая информация о развертывании
- [UBUNTU_DEPLOYMENT.md](UBUNTU_DEPLOYMENT.md) - Развертывание на Ubuntu/Debian
- [REDEPLOY_GUIDE.md](REDEPLOY_GUIDE.md) - Руководство по обновлению
- [REDEPLOY_UBUNTU_QUICK.md](REDEPLOY_UBUNTU_QUICK.md) - Быстрое обновление на Ubuntu
- [cloud.md](cloud.md) - Развертывание в облаке
## Функционал
- [FEATURES_UPDATE.md](FEATURES_UPDATE.md) - Обновления функционала
- [REVIEW_FEATURES.md](REVIEW_FEATURES.md) - Возможности review
- [ORGANIZATION_FEATURE.md](ORGANIZATION_FEATURE.md) - Работа с организациями
- [ORGANIZATION_QUICKSTART.md](ORGANIZATION_QUICKSTART.md) - Быстрый старт с организациями
- [MASTER_TOKEN_FEATURE.md](MASTER_TOKEN_FEATURE.md) - Мастер токены
- [PR_CONTEXT_FEATURE.md](PR_CONTEXT_FEATURE.md) - Контекст Pull Request
- [HTML_ESCAPE_FIX.md](HTML_ESCAPE_FIX.md) - Исправление экранирования HTML
## Архитектура и разработка
- [ARCHITECTURE.md](ARCHITECTURE.md) - Архитектура проекта
- [PROJECT_STATUS.md](PROJECT_STATUS.md) - Статус проекта
- [CONTRIBUTING.md](CONTRIBUTING.md) - Как внести вклад
## Changelog
- [CHANGELOG.md](CHANGELOG.md) - История изменений
- [CHANGELOG_ORGANIZATIONS.md](CHANGELOG_ORGANIZATIONS.md) - История изменений организаций
## Команды и отладка
- [COMMANDS.md](COMMANDS.md) - Полезные команды
- [DEBUG_GUIDE.md](DEBUG_GUIDE.md) - Руководство по отладке
## Настройки и рекомендации
- [MODEL_RECOMMENDATION.md](MODEL_RECOMMENDATION.md) - Рекомендации по выбору модели
- [PRODUCTION_URLS.md](PRODUCTION_URLS.md) - Настройка production URL
- [SUMMARY.md](SUMMARY.md) - Краткое резюме проекта
## Дополнительно
- [TEST_STREAMING.md](TEST_STREAMING.md) - Тестирование WebSocket стриминга

View File

@ -1,92 +0,0 @@
#!/bin/bash
###############################################################################
# Скрипт быстрого исправления установки
###############################################################################
set -e
echo "=========================================="
echo "Fixing AI Review Installation"
echo "=========================================="
echo ""
# Определить директорию установки (где находится скрипт)
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
INSTALL_DIR="$SCRIPT_DIR"
echo "Working directory: $INSTALL_DIR"
echo ""
cd "$INSTALL_DIR"
# 1. Создать Python virtual environment
echo "[1/5] Создание Python virtual environment..."
cd backend
python3 -m venv venv
source venv/bin/activate
echo "✓ venv создан"
echo ""
# 2. Установить Python зависимости
echo "[2/5] Установка Python зависимостей..."
pip install --upgrade pip > /dev/null
pip install -r requirements.txt > /dev/null
echo "✓ Python зависимости установлены"
echo ""
# 3. Создать базу данных
echo "[3/5] Создание базы данных..."
python migrate.py
echo "✓ База данных создана"
echo ""
# 4. Установить Node.js зависимости и собрать frontend
echo "[4/5] Сборка frontend..."
cd ../frontend
# Проверить наличие Node.js
if ! command -v node &> /dev/null; then
echo "ERROR: Node.js не установлен!"
echo "Установите: curl -fsSL https://deb.nodesource.com/setup_18.x | sudo -E bash - && sudo apt-get install -y nodejs"
exit 1
fi
# Создать .env.production
cat > .env.production << 'EOF'
VITE_API_URL=/api
VITE_WS_URL=
EOF
npm install > /dev/null 2>&1
npm run build
echo "✓ Frontend собран"
echo ""
# 5. Проверить результат
echo "[5/5] Проверка..."
cd ..
echo "Backend venv: $([ -d backend/venv ] && echo '✓ OK' || echo '✗ MISSING')"
echo "Backend DB: $([ -f backend/review.db ] && echo '✓ OK' || echo '✗ MISSING')"
echo "Frontend build: $([ -d backend/public ] && echo '✓ OK' || echo '✗ MISSING')"
echo ""
if [ -d backend/venv ] && [ -f backend/review.db ] && [ -d backend/public ]; then
echo "=========================================="
echo "✓ Installation fixed successfully!"
echo "=========================================="
echo ""
echo "Now run:"
echo " sudo systemctl restart ai-review"
echo " sudo systemctl status ai-review"
echo ""
echo "Or start manually:"
echo " cd $INSTALL_DIR/backend"
echo " source venv/bin/activate"
echo " uvicorn app.main:app --host 0.0.0.0 --port 8000"
else
echo "ERROR: Something is still missing!"
exit 1
fi

View File

@ -1,91 +0,0 @@
#!/bin/bash
echo "=========================================="
echo "Creating simple systemd service"
echo "=========================================="
echo ""
INSTALL_DIR="$HOME/code-review-agent"
echo "Install directory: $INSTALL_DIR"
echo "User: $USER"
echo ""
# Создать простой systemd service БЕЗ жестких ограничений
sudo tee /etc/systemd/system/ai-review.service > /dev/null << EOF
[Unit]
Description=AI Code Review Platform
After=network.target
[Service]
Type=simple
User=$USER
WorkingDirectory=$INSTALL_DIR/backend
Environment="PATH=$INSTALL_DIR/backend/venv/bin:/usr/local/bin:/usr/bin:/bin"
ExecStart=$INSTALL_DIR/backend/venv/bin/python3 -m uvicorn app.main:app --host 0.0.0.0 --port 8000
Restart=always
RestartSec=10
StandardOutput=append:/var/log/ai-review/access.log
StandardError=append:/var/log/ai-review/error.log
[Install]
WantedBy=multi-user.target
EOF
echo "✓ Service файл создан"
echo ""
# Создать директорию логов
sudo mkdir -p /var/log/ai-review
sudo chown $USER:$USER /var/log/ai-review
echo "✓ Директория логов создана"
echo ""
# Перезагрузить systemd
echo "Перезагрузка systemd..."
sudo systemctl daemon-reload
sudo systemctl enable ai-review
echo "✓ Systemd обновлен"
echo ""
# Запустить
echo "Запуск сервиса..."
sudo systemctl restart ai-review
sleep 3
# Проверить статус
echo ""
echo "=========================================="
if sudo systemctl is-active --quiet ai-review; then
echo "✅ Сервис запущен успешно!"
echo "=========================================="
echo ""
sudo systemctl status ai-review --no-pager | head -20
echo ""
echo "Приложение доступно: http://localhost:8000"
echo ""
echo "Полезные команды:"
echo " sudo systemctl status ai-review"
echo " sudo journalctl -u ai-review -f"
echo " tail -f /var/log/ai-review/error.log"
else
echo "❌ Сервис не запустился"
echo "=========================================="
echo ""
echo "Статус:"
sudo systemctl status ai-review --no-pager
echo ""
echo "Последние 30 строк логов:"
sudo journalctl -u ai-review -n 30 --no-pager
echo ""
echo "Проверьте:"
echo " 1. tail -50 /var/log/ai-review/error.log"
echo " 2. Попробуйте запустить вручную:"
echo " cd $INSTALL_DIR/backend"
echo " source venv/bin/activate"
echo " python -m uvicorn app.main:app"
exit 1
fi
echo ""

View File

@ -7,6 +7,7 @@ import ReviewDetail from './pages/ReviewDetail';
import Organizations from './pages/Organizations'; import Organizations from './pages/Organizations';
import Tasks from './pages/Tasks'; import Tasks from './pages/Tasks';
import WebSocketStatus from './components/WebSocketStatus'; import WebSocketStatus from './components/WebSocketStatus';
import Footer from './components/Footer';
const queryClient = new QueryClient({ const queryClient = new QueryClient({
defaultOptions: { defaultOptions: {
@ -65,7 +66,7 @@ function Navigation() {
function AppContent() { function AppContent() {
return ( return (
<div className="min-h-screen bg-dark-bg"> <div className="min-h-screen bg-dark-bg pb-12">
<Navigation /> <Navigation />
<main className="max-w-7xl mx-auto px-4 sm:px-6 lg:px-8 py-8"> <main className="max-w-7xl mx-auto px-4 sm:px-6 lg:px-8 py-8">
@ -78,6 +79,8 @@ function AppContent() {
<Route path="/reviews/:id" element={<ReviewDetail />} /> <Route path="/reviews/:id" element={<ReviewDetail />} />
</Routes> </Routes>
</main> </main>
<Footer />
</div> </div>
); );
} }

View File

@ -68,5 +68,25 @@ export const getReviewStats = async () => {
return response.data; return response.data;
}; };
export interface ReviewEvent {
id: number;
review_id: number;
event_type: string;
step?: string;
message?: string;
data?: any;
created_at: string;
}
export const getReviewEvents = async (reviewId: number) => {
const response = await api.get<ReviewEvent[]>(`/reviews/${reviewId}/events`);
return response.data;
};
export const getBackendVersion = async () => {
const response = await api.get<{ version: string }>('/version');
return response.data;
};
export default api; export default api;

View File

@ -0,0 +1,39 @@
/**
* Footer component with version info
*/
import { useQuery } from '@tanstack/react-query';
import { getBackendVersion } from '../api/client';
export default function Footer() {
const { data: versionData } = useQuery({
queryKey: ['backendVersion'],
queryFn: getBackendVersion,
staleTime: 60000, // Cache for 1 minute
refetchInterval: 300000, // Refetch every 5 minutes
});
return (
<footer className="fixed bottom-0 left-0 right-0 bg-dark-card border-t border-dark-border py-2 px-4 z-10">
<div className="container mx-auto flex items-center justify-between text-xs text-dark-text-muted">
<div>
AI Code Review Agent
</div>
<div className="flex items-center gap-4">
<span>
Backend v{versionData?.version || '...'}
</span>
<a
href="https://github.com/yourusername/ai-review-agent"
target="_blank"
rel="noopener noreferrer"
className="hover:text-dark-text-secondary transition-colors"
>
GitHub
</a>
</div>
</div>
</footer>
);
}

View File

@ -1,5 +1,6 @@
import React, { useEffect, useState } from 'react'; import React, { useEffect, useState } from 'react';
import { WS_URL } from '../api/websocket'; import { WS_URL } from '../api/websocket';
import { getReviewEvents, ReviewEvent } from '../api/client';
interface StreamEvent { interface StreamEvent {
type: string; type: string;
@ -36,6 +37,41 @@ export const ReviewStream: React.FC<ReviewStreamProps> = ({ reviewId }) => {
console.log('🔌 Connecting to WebSocket:', WS_URL); console.log('🔌 Connecting to WebSocket:', WS_URL);
console.log('👀 Watching for review ID:', reviewId); console.log('👀 Watching for review ID:', reviewId);
// Load historical events from database
const loadHistoricalEvents = async () => {
try {
console.log('📥 Loading historical events from DB...');
const historicalEvents = await getReviewEvents(reviewId);
console.log(`✅ Loaded ${historicalEvents.length} historical events`);
// Convert DB events to stream events format
const streamEvents: StreamEvent[] = historicalEvents.map((dbEvent: ReviewEvent) => ({
type: dbEvent.event_type,
review_id: dbEvent.review_id,
pr_number: 0, // Not stored in DB
timestamp: dbEvent.created_at,
data: {
type: dbEvent.event_type,
step: dbEvent.step,
message: dbEvent.message,
data: dbEvent.data
}
}));
setEvents(streamEvents);
// Set current step from last event
const lastAgentStep = streamEvents.reverse().find(e => e.type === 'agent_step');
if (lastAgentStep && lastAgentStep.data.step) {
setCurrentStep(lastAgentStep.data.step);
}
} catch (error) {
console.error('❌ Error loading historical events:', error);
}
};
loadHistoricalEvents();
const ws = new WebSocket(WS_URL); const ws = new WebSocket(WS_URL);
let pingInterval: number; let pingInterval: number;

137
test_llm_streaming.py Normal file
View File

@ -0,0 +1,137 @@
"""
Тест стриминга LLM messages от LangGraph
"""
import asyncio
from langgraph.graph import StateGraph, END
from typing import TypedDict, Annotated
import operator
from langchain_ollama import OllamaLLM
class TestState(TypedDict):
messages: Annotated[list, operator.add]
result: str
async def llm_node(state: TestState) -> TestState:
"""Нода с LLM вызовом"""
print(" [LLM NODE] Вызов LLM...")
llm = OllamaLLM(
model="qwen2.5-coder:3b",
base_url="http://localhost:11434",
temperature=0.7
)
# Простой промпт для быстрого ответа
prompt = "Напиши короткую проверку кода на Python (не более 100 символов)"
response = await llm.ainvoke(prompt)
print(f" [LLM NODE] Ответ получен: {response[:50]}...")
return {
"messages": [{"role": "ai", "content": response}],
"result": response
}
def create_test_graph():
"""Создает тестовый граф с LLM"""
workflow = StateGraph(TestState)
workflow.add_node("llm_call", llm_node)
workflow.set_entry_point("llm_call")
workflow.add_edge("llm_call", END)
return workflow.compile()
async def test_with_llm():
"""Тест стриминга с LLM"""
print("\n" + "="*80)
print("ТЕСТ СТРИМИНГА LLM MESSAGES")
print("="*80)
graph = create_test_graph()
initial_state: TestState = {
"messages": [],
"result": ""
}
# Тест: updates + messages
print(f"\n🔍 Тест: stream_mode=['updates', 'messages']")
print("-" * 80)
event_count = 0
messages_count = 0
async for event in graph.astream(initial_state, stream_mode=["updates", "messages"]):
event_count += 1
if isinstance(event, tuple) and len(event) >= 2:
event_type, event_data = event[0], event[1]
print(f"\n📨 Event #{event_count}")
print(f" Type: {event_type}")
print(f" Data type: {type(event_data)}")
if event_type == 'updates':
print(f" ✅ Node update")
if isinstance(event_data, dict):
for node_name in event_data.keys():
print(f" Node: {node_name}")
elif event_type == 'messages':
messages_count += 1
print(f" 💬 LLM Messages (#{messages_count})")
if isinstance(event_data, (list, tuple)):
for i, msg in enumerate(event_data):
print(f" Message {i+1}:")
# Извлекаем контент
if hasattr(msg, 'content'):
content = msg.content
print(f" Content: {content[:100]}...")
elif isinstance(msg, dict):
print(f" Dict: {msg}")
else:
print(f" Type: {type(msg)}")
print(f" Str: {str(msg)[:100]}...")
print(f"\n" + "="*80)
print(f"Всего событий: {event_count}")
print(f"✅ Messages событий: {messages_count}")
print("="*80)
async def main():
print("\n" + "="*80)
print("ТЕСТИРОВАНИЕ LLM STREAMING В LANGGRAPH")
print("="*80)
print("\nПроверка Ollama...")
try:
# Проверяем что Ollama доступен
from langchain_ollama import OllamaLLM
test_llm = OllamaLLM(model="qwen2.5-coder:3b", base_url="http://localhost:11434")
result = await test_llm.ainvoke("test")
print("✅ Ollama работает!")
except Exception as e:
print(f"❌ Ошибка подключения к Ollama: {e}")
print("\n⚠️ Убедитесь что Ollama запущен: ollama serve")
print("⚠️ И модель загружена: ollama pull qwen2.5-coder:3b\n")
return
await test_with_llm()
print("\n✅ Тестирование завершено\n")
if __name__ == "__main__":
asyncio.run(main())

72
tests/README.md Normal file
View File

@ -0,0 +1,72 @@
# Тесты
Эта папка содержит тестовые скрипты для проверки различных компонентов системы.
## Тесты стриминга
### test_simple_graph.py
Простой тест стриминга LangGraph без реальных данных и БД.
**Запуск:**
```bash
cd backend
$env:PYTHONIOENCODING="utf-8"; ./venv/Scripts/python ../tests/test_simple_graph.py # Windows PowerShell
# или
python ../tests/test_simple_graph.py # Linux/Mac
```
**Что тестирует:**
- Различные режимы стриминга (`updates`, `messages`, `values`, `debug`)
- Обработку событий через callback
- Формат событий от LangGraph
### test_langgraph_events.py
Полный тест с реальным ReviewerAgent и БД.
**Требования:**
- Работающая БД с данными
- Существующий Review ID, PR Number, Repository ID
- Настроенный `.env` файл
**Запуск:**
1. Отредактируйте параметры в файле:
```python
TEST_REVIEW_ID = 1
TEST_PR_NUMBER = 5
TEST_REPOSITORY_ID = 1
```
2. Запустите:
```bash
cd backend
python ../tests/test_langgraph_events.py
```
### test_llm_streaming.py
Тест стриминга LLM messages с реальным Ollama.
**Требования:**
- Ollama запущен (`ollama serve`)
- Модель загружена (`ollama pull qwen2.5-coder:3b`)
**Запуск:**
```bash
cd backend
$env:PYTHONIOENCODING="utf-8"; ./venv/Scripts/python ../tests/test_llm_streaming.py # Windows
python ../tests/test_llm_streaming.py # Linux/Mac
```
## Добавление новых тестов
Добавляйте новые тесты в эту папку с префиксом `test_`.
## Полезные ссылки
- [TEST_STREAMING.md](../docs/TEST_STREAMING.md) - Детальная документация по тестированию стриминга

137
tests/test_llm_streaming.py Normal file
View File

@ -0,0 +1,137 @@
"""
Тест стриминга LLM messages от LangGraph
"""
import asyncio
from langgraph.graph import StateGraph, END
from typing import TypedDict, Annotated
import operator
from langchain_ollama import OllamaLLM
class TestState(TypedDict):
messages: Annotated[list, operator.add]
result: str
async def llm_node(state: TestState) -> TestState:
"""Нода с LLM вызовом"""
print(" [LLM NODE] Вызов LLM...")
llm = OllamaLLM(
model="qwen2.5-coder:3b",
base_url="http://localhost:11434",
temperature=0.7
)
# Простой промпт для быстрого ответа
prompt = "Напиши короткую проверку кода на Python (не более 100 символов)"
response = await llm.ainvoke(prompt)
print(f" [LLM NODE] Ответ получен: {response[:50]}...")
return {
"messages": [{"role": "ai", "content": response}],
"result": response
}
def create_test_graph():
"""Создает тестовый граф с LLM"""
workflow = StateGraph(TestState)
workflow.add_node("llm_call", llm_node)
workflow.set_entry_point("llm_call")
workflow.add_edge("llm_call", END)
return workflow.compile()
async def test_with_llm():
"""Тест стриминга с LLM"""
print("\n" + "="*80)
print("ТЕСТ СТРИМИНГА LLM MESSAGES")
print("="*80)
graph = create_test_graph()
initial_state: TestState = {
"messages": [],
"result": ""
}
# Тест: updates + messages
print(f"\n🔍 Тест: stream_mode=['updates', 'messages']")
print("-" * 80)
event_count = 0
messages_count = 0
async for event in graph.astream(initial_state, stream_mode=["updates", "messages"]):
event_count += 1
if isinstance(event, tuple) and len(event) >= 2:
event_type, event_data = event[0], event[1]
print(f"\n📨 Event #{event_count}")
print(f" Type: {event_type}")
print(f" Data type: {type(event_data)}")
if event_type == 'updates':
print(f" ✅ Node update")
if isinstance(event_data, dict):
for node_name in event_data.keys():
print(f" Node: {node_name}")
elif event_type == 'messages':
messages_count += 1
print(f" 💬 LLM Messages (#{messages_count})")
if isinstance(event_data, (list, tuple)):
for i, msg in enumerate(event_data):
print(f" Message {i+1}:")
# Извлекаем контент
if hasattr(msg, 'content'):
content = msg.content
print(f" Content: {content[:100]}...")
elif isinstance(msg, dict):
print(f" Dict: {msg}")
else:
print(f" Type: {type(msg)}")
print(f" Str: {str(msg)[:100]}...")
print(f"\n" + "="*80)
print(f"Всего событий: {event_count}")
print(f"✅ Messages событий: {messages_count}")
print("="*80)
async def main():
print("\n" + "="*80)
print("ТЕСТИРОВАНИЕ LLM STREAMING В LANGGRAPH")
print("="*80)
print("\nПроверка Ollama...")
try:
# Проверяем что Ollama доступен
from langchain_ollama import OllamaLLM
test_llm = OllamaLLM(model="qwen2.5-coder:3b", base_url="http://localhost:11434")
result = await test_llm.ainvoke("test")
print("✅ Ollama работает!")
except Exception as e:
print(f"❌ Ошибка подключения к Ollama: {e}")
print("\n⚠️ Убедитесь что Ollama запущен: ollama serve")
print("⚠️ И модель загружена: ollama pull qwen2.5-coder:3b\n")
return
await test_with_llm()
print("\n✅ Тестирование завершено\n")
if __name__ == "__main__":
asyncio.run(main())