Erweiterungen
This commit is contained in:
@@ -14,4 +14,18 @@ logging.level.de.assecutor.votianlt=INFO
|
||||
logging.level.root=WARN
|
||||
logging.file.name=logs/votianlt-production.log
|
||||
logging.file.max-size=50MB
|
||||
logging.file.max-history=90
|
||||
logging.file.max-history=90
|
||||
|
||||
# Debug logging for AI/LLM troubleshooting (can be disabled after debugging)
|
||||
logging.level.org.springframework.ai=DEBUG
|
||||
logging.level.org.springframework.web.client.RestTemplate=DEBUG
|
||||
logging.level.org.springframework.web.client.RestClient=DEBUG
|
||||
logging.level.org.apache.http=DEBUG
|
||||
logging.level.org.apache.http.wire=DEBUG
|
||||
logging.level.org.apache.http.headers=DEBUG
|
||||
# Java HTTP Client logging
|
||||
logging.level.jdk.httpclient=DEBUG
|
||||
logging.level.java.net.http=DEBUG
|
||||
# Spring HTTP logging
|
||||
logging.level.org.springframework.http.client=DEBUG
|
||||
logging.level.de.assecutor.votianlt.ai=DEBUG
|
||||
@@ -113,6 +113,13 @@ spring.ai.openai.api-key=not-used
|
||||
spring.ai.openai.chat.options.model=local-model
|
||||
spring.ai.openai.chat.options.temperature=0.7
|
||||
|
||||
# WICHTIG: Streaming deaktivieren - LM Studio/Docker können Streaming-Responses nicht korrekt handlen
|
||||
spring.ai.openai.chat.options.stream=false
|
||||
|
||||
# Timeouts für OpenAI Client
|
||||
spring.ai.openai.connect-timeout=10s
|
||||
spring.ai.openai.read-timeout=120s
|
||||
|
||||
# ===========================================
|
||||
# MCP Server Configuration
|
||||
# ===========================================
|
||||
|
||||
Reference in New Issue
Block a user