- parser.py renamed to go_parser.py (avoids Python builtin conflict) - docgen.py was missing from flat structure - Added pyproject.toml for uv - Updated .mcp.json to use uv run - Updated README for uv workflow
140 lines
4.3 KiB
Python
140 lines
4.3 KiB
Python
"""Ollama client for generating documentation."""
|
|
|
|
import requests
|
|
import os
|
|
import concurrent.futures
|
|
import time
|
|
|
|
OLLAMA_URL = os.environ.get("OLLAMA_URL", "http://192.168.86.172:11434")
|
|
OLLAMA_MODEL = os.environ.get("OLLAMA_MODEL", "qwen2.5:7b")
|
|
MAX_CONCURRENT = int(os.environ.get("MAX_CONCURRENT", "4"))
|
|
|
|
|
|
def generate_file_doc(filepath: str, content: str) -> str:
|
|
"""Generate documentation for a single file."""
|
|
if len(content) > 8000:
|
|
content = content[:8000] + "\n\n... [truncated]"
|
|
|
|
prompt = f"""You are a senior software engineer documenting a Go codebase.
|
|
|
|
Describe what this file does in 2-4 sentences. Be specific about:
|
|
- The domain logic and purpose (not just "this file contains functions")
|
|
- Key types, interfaces, or structs defined
|
|
- How it fits into the larger system (if apparent from imports/naming)
|
|
|
|
Do NOT describe Go syntax or language mechanics. Describe WHAT the code does and WHY.
|
|
|
|
File: {filepath}
|
|
|
|
```go
|
|
{content}
|
|
```
|
|
|
|
Documentation:"""
|
|
|
|
return _call_ollama(prompt)
|
|
|
|
|
|
def generate_relationship_doc(file_a: str, content_a: str, file_b: str, content_b: str) -> str:
|
|
"""Generate documentation for a relationship between two files."""
|
|
if len(content_a) > 4000:
|
|
content_a = content_a[:4000] + "\n... [truncated]"
|
|
if len(content_b) > 4000:
|
|
content_b = content_b[:4000] + "\n... [truncated]"
|
|
|
|
prompt = f"""You are a senior software engineer documenting how two files in a Go codebase interact.
|
|
|
|
Describe in 1-2 sentences how File A uses or depends on File B. Be specific about which types, functions, or interfaces are shared.
|
|
|
|
File A: {file_a}
|
|
```go
|
|
{content_a}
|
|
```
|
|
|
|
File B: {file_b}
|
|
```go
|
|
{content_b}
|
|
```
|
|
|
|
Relationship:"""
|
|
|
|
return _call_ollama(prompt)
|
|
|
|
|
|
def generate_repo_doc(readme: str, entry_files: list[tuple[str, str]]) -> str:
|
|
"""Generate repo-level documentation from README and key entry points."""
|
|
files_section = ""
|
|
for path, content in entry_files[:5]:
|
|
snippet = content[:2000] if len(content) > 2000 else content
|
|
files_section += f"\n--- {path} ---\n{snippet}\n"
|
|
|
|
readme_section = readme[:3000] if len(readme) > 3000 else readme
|
|
|
|
prompt = f"""You are a senior software engineer writing a project overview.
|
|
|
|
Based on the README and key source files below, write a 4-6 sentence summary of this project. Cover:
|
|
- What the project does (its purpose)
|
|
- Key architectural patterns (routing, middleware, etc.)
|
|
- The main abstractions and how they fit together
|
|
|
|
README:
|
|
{readme_section}
|
|
|
|
Key source files:
|
|
{files_section}
|
|
|
|
Project Overview:"""
|
|
|
|
return _call_ollama(prompt)
|
|
|
|
|
|
def generate_docs_batch(items: list[tuple[str, str]], doc_fn) -> list[str]:
|
|
"""Generate docs for multiple items concurrently."""
|
|
results = [None] * len(items)
|
|
|
|
with concurrent.futures.ThreadPoolExecutor(max_workers=MAX_CONCURRENT) as executor:
|
|
future_to_idx = {}
|
|
for i, (filepath, content) in enumerate(items):
|
|
future = executor.submit(doc_fn, filepath, content)
|
|
future_to_idx[future] = i
|
|
|
|
done = 0
|
|
total = len(items)
|
|
for future in concurrent.futures.as_completed(future_to_idx):
|
|
idx = future_to_idx[future]
|
|
try:
|
|
results[idx] = future.result()
|
|
except Exception as e:
|
|
results[idx] = f"[doc generation failed: {e}]"
|
|
done += 1
|
|
if done % 10 == 0 or done == total:
|
|
print(f" Generated {done}/{total} docs")
|
|
|
|
return results
|
|
|
|
|
|
def _call_ollama(prompt: str, retries: int = 3) -> str:
|
|
"""Call Ollama API with retries."""
|
|
for attempt in range(retries):
|
|
try:
|
|
resp = requests.post(
|
|
f"{OLLAMA_URL}/api/generate",
|
|
json={
|
|
"model": OLLAMA_MODEL,
|
|
"prompt": prompt,
|
|
"stream": False,
|
|
"options": {
|
|
"temperature": 0.3,
|
|
"num_predict": 256,
|
|
},
|
|
},
|
|
timeout=120,
|
|
)
|
|
resp.raise_for_status()
|
|
return resp.json()["response"].strip()
|
|
except Exception as e:
|
|
if attempt < retries - 1:
|
|
time.sleep(2 ** attempt)
|
|
continue
|
|
return f"[doc generation failed after {retries} attempts: {e}]"
|