Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
138 changes: 138 additions & 0 deletions .github/workflows/docker-build-push.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,138 @@
# name: Build and Push Docker Image

# on:
# push:
# branches:
# - main
# paths:
# - 'backend/**'
# - 'frontend/**'
# - 'docker-compose.yml'
# - '.github/workflows/docker-build-push.yml'
# pull_request:
# branches:
# - main
# paths:
# - 'backend/**'
# - 'frontend/**'
# - 'docker-compose.yml'
# - '.github/workflows/docker-build-push.yml'

# env:
# REGISTRY: docker.io
# BACKEND_IMAGE_NAME: mathmodelagent-backend
# FRONTEND_IMAGE_NAME: mathmodelagent-frontend

# jobs:
# build-backend:
# runs-on: ubuntu-latest
# outputs:
# image-tag: ${{ steps.meta.outputs.tags }}
# image-digest: ${{ steps.build.outputs.digest }}

# steps:
# - name: Checkout code
# uses: actions/checkout@v4

# - name: Set up Docker Buildx
# uses: docker/setup-buildx-action@v3

# - name: Log in to Docker Hub
# if: github.event_name != 'pull_request'
# uses: docker/login-action@v3
# with:
# registry: ${{ env.REGISTRY }}
# username: ${{ secrets.DOCKER_USERNAME }}
# password: ${{ secrets.DOCKER_PASSWORD }}

# - name: Extract metadata for backend
# id: meta
# uses: docker/metadata-action@v5
# with:
# images: ${{ env.REGISTRY }}/${{ secrets.DOCKER_USERNAME }}/${{ env.BACKEND_IMAGE_NAME }}
# tags: |
# type=ref,event=branch
# type=ref,event=pr
# type=sha,prefix={{branch}}-
# type=raw,value=latest,enable={{is_default_branch}}

# - name: Build and push backend Docker image
# id: build
# uses: docker/build-push-action@v5
# with:
# context: ./backend
# file: ./backend/Dockerfile
# push: ${{ github.event_name != 'pull_request' }}
# tags: ${{ steps.meta.outputs.tags }}
# labels: ${{ steps.meta.outputs.labels }}
# cache-from: type=gha
# cache-to: type=gha,mode=max
# platforms: linux/amd64,linux/arm64

# build-frontend:
# runs-on: ubuntu-latest
# outputs:
# image-tag: ${{ steps.meta.outputs.tags }}
# image-digest: ${{ steps.build.outputs.digest }}

# steps:
# - name: Checkout code
# uses: actions/checkout@v4

# - name: Set up Docker Buildx
# uses: docker/setup-buildx-action@v3

# - name: Log in to Docker Hub
# if: github.event_name != 'pull_request'
# uses: docker/login-action@v3
# with:
# registry: ${{ env.REGISTRY }}
# username: ${{ secrets.DOCKER_USERNAME }}
# password: ${{ secrets.DOCKER_PASSWORD }}

# - name: Extract metadata for frontend
# id: meta
# uses: docker/metadata-action@v5
# with:
# images: ${{ env.REGISTRY }}/${{ secrets.DOCKER_USERNAME }}/${{ env.FRONTEND_IMAGE_NAME }}
# tags: |
# type=ref,event=branch
# type=ref,event=pr
# type=sha,prefix={{branch}}-
# type=raw,value=latest,enable={{is_default_branch}}

# - name: Build and push frontend Docker image
# id: build
# uses: docker/build-push-action@v5
# with:
# context: ./frontend
# file: ./frontend/Dockerfile
# push: ${{ github.event_name != 'pull_request' }}
# tags: ${{ steps.meta.outputs.tags }}
# labels: ${{ steps.meta.outputs.labels }}
# cache-from: type=gha
# cache-to: type=gha,mode=max
# platforms: linux/amd64,linux/arm64

# security-scan:
# runs-on: ubuntu-latest
# needs: [build-backend, build-frontend]
# if: github.event_name != 'pull_request'

# strategy:
# matrix:
# component: [backend, frontend]

# steps:
# - name: Run Trivy vulnerability scanner
# uses: aquasecurity/trivy-action@master
# with:
# image-ref: ${{ needs[format('build-{0}', matrix.component)].outputs.image-tag }}
# format: 'sarif'
# output: 'trivy-results-${{ matrix.component }}.sarif'

# - name: Upload Trivy scan results to GitHub Security tab
# uses: github/codeql-action/upload-sarif@v3
# if: always()
# with:
# sarif_file: 'trivy-results-${{ matrix.component }}.sarif'
3 changes: 2 additions & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,8 @@ docker-compose up -d

启动后端

*启动 redis*
> [!CAUTION]
> 启动 Redis, 下载和运行问 AI

```bash
cd backend # 切换到 backend 目录下
Expand Down
5 changes: 1 addition & 4 deletions backend/.env.dev.example
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,6 @@ WRITER_API_KEY=
WRITER_MODEL=
# WRITER_BASE_URL=

DEFAULT_API_KEY=
DEFAULT_MODEL=
# DEFAULT_BASE_URL=

# 模型最大问答次数
MAX_CHAT_TURNS=60
# 思考反思次数
Expand All @@ -39,6 +35,7 @@ LOG_LEVEL=DEBUG
DEBUG=true
# 确保安装 Redis
# 如果是docker: REDIS_URL=redis://redis:6379/0
# 本地部署 : redis://localhost:6379/0
REDIS_URL=redis://localhost:6379/0
REDIS_MAX_CONNECTIONS=20
CORS_ALLOW_ORIGINS=http://localhost:5173,http://localhost:3000
1 change: 1 addition & 0 deletions backend/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -22,4 +22,5 @@ RUN --mount=type=cache,target=/root/.cache/uv \

EXPOSE 8000

# 直接使用 uvicorn,因为依赖已安装到系统 Python
CMD ["uv", "run", "uvicorn", "app.main:app", "--host", "0.0.0.0", "--port", "8000", "--ws-ping-interval", "60", "--ws-ping-timeout", "120"]
43 changes: 16 additions & 27 deletions backend/app/config/setting.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,35 +18,31 @@ def parse_cors(value: str) -> list[str]:
class Settings(BaseSettings):
ENV: str

COORDINATOR_API_KEY: str
COORDINATOR_MODEL: str
COORDINATOR_API_KEY: Optional[str] = None
COORDINATOR_MODEL: Optional[str] = None
COORDINATOR_BASE_URL: Optional[str] = None

MODELER_API_KEY: str
MODELER_MODEL: str
MODELER_API_KEY: Optional[str] = None
MODELER_MODEL: Optional[str] = None
MODELER_BASE_URL: Optional[str] = None

CODER_API_KEY: str
CODER_MODEL: str
CODER_API_KEY: Optional[str] = None
CODER_MODEL: Optional[str] = None
CODER_BASE_URL: Optional[str] = None

WRITER_API_KEY: str
WRITER_MODEL: str
WRITER_API_KEY: Optional[str] = None
WRITER_MODEL: Optional[str] = None
WRITER_BASE_URL: Optional[str] = None

DEFAULT_API_KEY: str
DEFAULT_MODEL: str
DEFAULT_BASE_URL: Optional[str] = None

MAX_CHAT_TURNS: int
MAX_RETRIES: int
MAX_CHAT_TURNS: int = 60
MAX_RETRIES: int = 5
E2B_API_KEY: Optional[str] = None
LOG_LEVEL: str
DEBUG: bool
REDIS_URL: str
REDIS_MAX_CONNECTIONS: int
CORS_ALLOW_ORIGINS: Annotated[list[str] | str, BeforeValidator(parse_cors)]
SERVER_HOST: str = "http://localhost:8000" # 默认值
LOG_LEVEL: str = "DEBUG"
DEBUG: bool = True
REDIS_URL: str = "redis://redis:6379/0"
REDIS_MAX_CONNECTIONS: int = 10
CORS_ALLOW_ORIGINS: Annotated[list[str] | str, BeforeValidator(parse_cors)] = "*"
SERVER_HOST: str = "http://localhost:8000"
OPENALEX_EMAIL: Optional[str] = None

model_config = SettingsConfigDict(
Expand All @@ -55,13 +51,6 @@ class Settings(BaseSettings):
extra="allow",
)

def get_deepseek_config(self) -> dict:
return {
"api_key": self.DEEPSEEK_API_KEY,
"model": self.DEEPSEEK_MODEL,
"base_url": self.DEEPSEEK_BASE_URL,
}

@classmethod
def from_env(cls, env: str = None):
env = env or os.getenv("ENV", "dev")
Expand Down
6 changes: 3 additions & 3 deletions backend/app/core/agents/coordinator_agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,9 @@ async def run(self, ques_all: str) -> CoordinatorToModeler:
)
json_str = response.choices[0].message.content

if not json_str.startswith("```json"):
logger.info(f"拒绝回答用户非数学建模请求:{json_str}")
raise ValueError(f"拒绝回答用户非数学建模请求:{json_str}")
# if not json_str.startswith("```json"):
# logger.info(f"拒绝回答用户非数学建模请求:{json_str}")
# raise ValueError(f"拒绝回答用户非数学建模请求:{json_str}")

# 清理 JSON 字符串
json_str = json_str.replace("```json", "").replace("```", "").strip()
Expand Down
4 changes: 4 additions & 0 deletions backend/app/core/prompts.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,8 @@
CODER_PROMPT = f"""
You are an AI code interpreter specializing in data analysis with Python. Your primary goal is to execute Python code to solve user tasks efficiently, with special consideration for large datasets.

中文回复

**Environment**: {platform.system()}
**Key Skills**: pandas, numpy, seaborn, matplotlib, scikit-learn, xgboost, scipy
**Data Visualization Style**: Nature/Science publication quality
Expand Down Expand Up @@ -139,6 +141,8 @@ def get_writer_prompt(
# Role Definition
Professional writer for mathematical modeling competitions with expertise in technical documentation and literature synthesis

中文回复

# Core Tasks
1. Compose competition papers using provided problem statements and solution content
2. Strictly adhere to {format_output} formatting templates
Expand Down
104 changes: 104 additions & 0 deletions backend/app/routers/modeling_router.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,114 @@
from fastapi import HTTPException
from icecream import ic
from app.schemas.request import ExampleRequest
from pydantic import BaseModel
import litellm
from app.config.setting import settings

router = APIRouter()


class ValidateApiKeyRequest(BaseModel):
api_key: str
base_url: str = "https://api.openai.com/v1"
model_id: str


class ValidateApiKeyResponse(BaseModel):
valid: bool
message: str


class SaveApiConfigRequest(BaseModel):
coordinator: dict
modeler: dict
coder: dict
writer: dict


@router.post("/save-api-config")
async def save_api_config(request: SaveApiConfigRequest):
"""
保存验证成功的 API 配置到 settings
"""
try:
# 更新各个模块的设置
if request.coordinator:
settings.COORDINATOR_API_KEY = request.coordinator.get('apiKey', '')
settings.COORDINATOR_MODEL = request.coordinator.get('modelId', '')
settings.COORDINATOR_BASE_URL = request.coordinator.get('baseUrl', '')

if request.modeler:
settings.MODELER_API_KEY = request.modeler.get('apiKey', '')
settings.MODELER_MODEL = request.modeler.get('modelId', '')
settings.MODELER_BASE_URL = request.modeler.get('baseUrl', '')

if request.coder:
settings.CODER_API_KEY = request.coder.get('apiKey', '')
settings.CODER_MODEL = request.coder.get('modelId', '')
settings.CODER_BASE_URL = request.coder.get('baseUrl', '')

if request.writer:
settings.WRITER_API_KEY = request.writer.get('apiKey', '')
settings.WRITER_MODEL = request.writer.get('modelId', '')
settings.WRITER_BASE_URL = request.writer.get('baseUrl', '')

return {"success": True, "message": "配置保存成功"}
except Exception as e:
logger.error(f"保存配置失败: {str(e)}")
raise HTTPException(status_code=500, detail=f"保存配置失败: {str(e)}")


@router.post("/validate-api-key", response_model=ValidateApiKeyResponse)
async def validate_api_key(request: ValidateApiKeyRequest):
"""
验证 API Key 的有效性
"""
try:
# 使用 litellm 发送测试请求
await litellm.acompletion(
model=request.model_id,
messages=[{"role": "user", "content": "Hi"}],
max_tokens=1,
api_key=request.api_key,
base_url=request.base_url if request.base_url != "https://api.openai.com/v1" else None,
)

return ValidateApiKeyResponse(
valid=True,
message="✓ 模型 API 验证成功"
)
except Exception as e:
error_msg = str(e)

# 解析不同类型的错误
if "401" in error_msg or "Unauthorized" in error_msg:
return ValidateApiKeyResponse(
valid=False,
message="✗ API Key 无效或已过期"
)
elif "404" in error_msg or "Not Found" in error_msg:
return ValidateApiKeyResponse(
valid=False,
message="✗ 模型 ID 不存在或 Base URL 错误"
)
elif "429" in error_msg or "rate limit" in error_msg.lower():
return ValidateApiKeyResponse(
valid=False,
message="✗ 请求过于频繁,请稍后再试"
)
elif "403" in error_msg or "Forbidden" in error_msg:
return ValidateApiKeyResponse(
valid=False,
message="✗ API 权限不足或账户余额不足"
)
else:
return ValidateApiKeyResponse(
valid=False,
message=f"✗ 验证失败: {error_msg[:50]}..."
)


@router.post("/example")
async def exampleModeling(
example_request: ExampleRequest,
Expand Down
Loading