# LLM Backend: "ollama" or "openai" LLM_BACKEND=ollama # Ollama settings OLLAMA_URL=http://192.168.86.172:11434 OLLAMA_MODEL=qwen2.5:7b # OpenAI-compatible settings (Kiro gateway, OpenRouter, etc.) # OPENAI_URL=http://192.168.86.11:8000 # OPENAI_MODEL=claude-haiku-4 # OPENAI_API_KEY=not-needed # Repo to ingest TARGET_REPO=https://github.com/labstack/echo.git # Parallelism MAX_CONCURRENT=4