Files
dev-intel-poc/docgen.py

191 lines
6.1 KiB
Python

"""LLM client for generating documentation. Supports Ollama and OpenAI-compatible APIs."""
import requests
import os
import concurrent.futures
import time
from pathlib import Path
# Load .env if present
_env_file = Path(__file__).parent / ".env"
if _env_file.exists():
for line in _env_file.read_text().splitlines():
line = line.strip()
if line and not line.startswith("#") and "=" in line:
key, _, val = line.partition("=")
os.environ.setdefault(key.strip(), val.strip())
# Backend: "ollama" or "openai"
LLM_BACKEND = os.environ.get("LLM_BACKEND", "ollama")
# Ollama settings
OLLAMA_URL = os.environ.get("OLLAMA_URL", "http://192.168.86.172:11434")
OLLAMA_MODEL = os.environ.get("OLLAMA_MODEL", "qwen2.5:7b")
# OpenAI-compatible settings (works with Kiro gateway, OpenRouter, etc.)
OPENAI_URL = os.environ.get("OPENAI_URL", "http://192.168.86.11:8000")
OPENAI_MODEL = os.environ.get("OPENAI_MODEL", "claude-haiku-4")
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY", "not-needed")
MAX_CONCURRENT = int(os.environ.get("MAX_CONCURRENT", "4"))
def generate_file_doc(filepath: str, content: str) -> str:
"""Generate documentation for a single file."""
if len(content) > 8000:
content = content[:8000] + "\n\n... [truncated]"
prompt = f"""You are a senior software engineer documenting a Go codebase.
Describe what this file does in 2-4 sentences. Be specific about:
- The domain logic and purpose (not just "this file contains functions")
- Key types, interfaces, or structs defined
- How it fits into the larger system (if apparent from imports/naming)
Do NOT describe Go syntax or language mechanics. Describe WHAT the code does and WHY.
File: {filepath}
```go
{content}
```
Documentation:"""
return _call_llm(prompt)
def generate_relationship_doc(file_a: str, content_a: str, file_b: str, content_b: str) -> str:
"""Generate documentation for a relationship between two files."""
if len(content_a) > 4000:
content_a = content_a[:4000] + "\n... [truncated]"
if len(content_b) > 4000:
content_b = content_b[:4000] + "\n... [truncated]"
prompt = f"""You are a senior software engineer documenting how two files in a Go codebase interact.
Describe in 1-2 sentences how File A uses or depends on File B. Be specific about which types, functions, or interfaces are shared.
File A: {file_a}
```go
{content_a}
```
File B: {file_b}
```go
{content_b}
```
Relationship:"""
return _call_llm(prompt)
def generate_repo_doc(readme: str, entry_files: list[tuple[str, str]]) -> str:
"""Generate repo-level documentation from README and key entry points."""
files_section = ""
for path, content in entry_files[:5]:
snippet = content[:2000] if len(content) > 2000 else content
files_section += f"\n--- {path} ---\n{snippet}\n"
readme_section = readme[:3000] if len(readme) > 3000 else readme
prompt = f"""You are a senior software engineer writing a project overview.
Based on the README and key source files below, write a 4-6 sentence summary of this project. Cover:
- What the project does (its purpose)
- Key architectural patterns (routing, middleware, etc.)
- The main abstractions and how they fit together
README:
{readme_section}
Key source files:
{files_section}
Project Overview:"""
return _call_llm(prompt)
def generate_docs_batch(items: list[tuple[str, str]], doc_fn) -> list[str]:
"""Generate docs for multiple items concurrently."""
results = [None] * len(items)
with concurrent.futures.ThreadPoolExecutor(max_workers=MAX_CONCURRENT) as executor:
future_to_idx = {}
for i, (filepath, content) in enumerate(items):
future = executor.submit(doc_fn, filepath, content)
future_to_idx[future] = i
done = 0
total = len(items)
for future in concurrent.futures.as_completed(future_to_idx):
idx = future_to_idx[future]
try:
results[idx] = future.result()
except Exception as e:
results[idx] = f"[doc generation failed: {e}]"
done += 1
if done % 10 == 0 or done == total:
print(f" Generated {done}/{total} docs")
return results
def _call_ollama(prompt: str, retries: int = 3) -> str:
"""Call Ollama API with retries."""
for attempt in range(retries):
try:
resp = requests.post(
f"{OLLAMA_URL}/api/generate",
json={
"model": OLLAMA_MODEL,
"prompt": prompt,
"stream": False,
"options": {
"temperature": 0.3,
"num_predict": 256,
},
},
timeout=120,
)
resp.raise_for_status()
return resp.json()["response"].strip()
except Exception as e:
if attempt < retries - 1:
time.sleep(2 ** attempt)
continue
return f"[doc generation failed after {retries} attempts: {e}]"
def _call_openai(prompt: str, retries: int = 3) -> str:
"""Call OpenAI-compatible API (Kiro gateway, OpenRouter, etc.)."""
for attempt in range(retries):
try:
resp = requests.post(
f"{OPENAI_URL}/v1/chat/completions",
headers={"Authorization": f"Bearer {OPENAI_API_KEY}"},
json={
"model": OPENAI_MODEL,
"messages": [{"role": "user", "content": prompt}],
"temperature": 0.3,
"max_tokens": 256,
},
timeout=120,
)
resp.raise_for_status()
return resp.json()["choices"][0]["message"]["content"].strip()
except Exception as e:
if attempt < retries - 1:
time.sleep(2 ** attempt)
continue
return f"[doc generation failed after {retries} attempts: {e}]"
def _call_llm(prompt: str) -> str:
"""Route to the configured backend."""
if LLM_BACKEND == "openai":
return _call_openai(prompt)
return _call_ollama(prompt)