fix: increase max_tokens for repo-level docs to 1024
The new structured repo doc prompt (5 sections with headers) needs more room than the old 4-6 sentence prompt. File and relationship docs stay at 256 tokens. Plumbed max_tokens through _call_llm → _call_ollama/_call_openai.
This commit is contained in:
16
docgen.py
16
docgen.py
@@ -121,7 +121,7 @@ Key source files (with their generated documentation):
|
||||
|
||||
Project Overview:"""
|
||||
|
||||
return _call_llm(prompt)
|
||||
return _call_llm(prompt, max_tokens=1024)
|
||||
|
||||
|
||||
def generate_docs_batch(items: list[tuple[str, str]], doc_fn) -> list[str]:
|
||||
@@ -149,7 +149,7 @@ def generate_docs_batch(items: list[tuple[str, str]], doc_fn) -> list[str]:
|
||||
return results
|
||||
|
||||
|
||||
def _call_ollama(prompt: str, retries: int = 3) -> str:
|
||||
def _call_ollama(prompt: str, retries: int = 3, max_tokens: int = 256) -> str:
|
||||
"""Call Ollama API with retries."""
|
||||
for attempt in range(retries):
|
||||
try:
|
||||
@@ -161,7 +161,7 @@ def _call_ollama(prompt: str, retries: int = 3) -> str:
|
||||
"stream": False,
|
||||
"options": {
|
||||
"temperature": 0.3,
|
||||
"num_predict": 256,
|
||||
"num_predict": max_tokens,
|
||||
},
|
||||
},
|
||||
timeout=120,
|
||||
@@ -175,7 +175,7 @@ def _call_ollama(prompt: str, retries: int = 3) -> str:
|
||||
return f"[doc generation failed after {retries} attempts: {e}]"
|
||||
|
||||
|
||||
def _call_openai(prompt: str, retries: int = 3) -> str:
|
||||
def _call_openai(prompt: str, retries: int = 3, max_tokens: int = 256) -> str:
|
||||
"""Call OpenAI-compatible API (Kiro gateway, OpenRouter, etc.)."""
|
||||
for attempt in range(retries):
|
||||
try:
|
||||
@@ -186,7 +186,7 @@ def _call_openai(prompt: str, retries: int = 3) -> str:
|
||||
"model": OPENAI_MODEL,
|
||||
"messages": [{"role": "user", "content": prompt}],
|
||||
"temperature": 0.3,
|
||||
"max_tokens": 256,
|
||||
"max_tokens": max_tokens,
|
||||
},
|
||||
timeout=120,
|
||||
)
|
||||
@@ -199,8 +199,8 @@ def _call_openai(prompt: str, retries: int = 3) -> str:
|
||||
return f"[doc generation failed after {retries} attempts: {e}]"
|
||||
|
||||
|
||||
def _call_llm(prompt: str) -> str:
|
||||
def _call_llm(prompt: str, max_tokens: int = 256) -> str:
|
||||
"""Route to the configured backend."""
|
||||
if LLM_BACKEND == "openai":
|
||||
return _call_openai(prompt)
|
||||
return _call_ollama(prompt)
|
||||
return _call_openai(prompt, max_tokens=max_tokens)
|
||||
return _call_ollama(prompt, max_tokens=max_tokens)
|
||||
|
||||
Reference in New Issue
Block a user