Erweiterungen

This commit is contained in:
2026-01-26 16:36:13 +01:00
parent 64fb022c38
commit 234e75c66a
7 changed files with 250 additions and 39 deletions

View File

@@ -6,7 +6,7 @@
<groupId>de.assecutor.votianlt</groupId> <groupId>de.assecutor.votianlt</groupId>
<artifactId>votianlt</artifactId> <artifactId>votianlt</artifactId>
<version>0.8.2</version> <version>0.8.4</version>
<packaging>jar</packaging> <packaging>jar</packaging>
@@ -118,6 +118,12 @@
<artifactId>spring-boot-starter-mail</artifactId> <artifactId>spring-boot-starter-mail</artifactId>
</dependency> </dependency>
<!-- Spring WebFlux for direct LLM API calls (like aimailassistant) -->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-webflux</artifactId>
</dependency>
<!-- Jackson JSR310 module for Java 8 date/time support --> <!-- Jackson JSR310 module for Java 8 date/time support -->
<dependency> <dependency>
<groupId>com.fasterxml.jackson.datatype</groupId> <groupId>com.fasterxml.jackson.datatype</groupId>

Binary file not shown.

View File

@@ -5,6 +5,10 @@ import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Value; import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Configuration; import org.springframework.context.annotation.Configuration;
import java.net.HttpURLConnection;
import java.net.URI;
import java.net.URL;
/** /**
* Configuration for LLM integration via LM Studio. * Configuration for LLM integration via LM Studio.
* LM Studio provides an OpenAI-compatible API. * LM Studio provides an OpenAI-compatible API.
@@ -21,9 +25,71 @@ public class LlmConfig {
@PostConstruct @PostConstruct
public void logConfig() { public void logConfig() {
log.info("LLM Configuration initialized:"); log.info("=== LLM Configuration ===");
log.info("Base URL: {}", baseUrl); log.info("Base URL: {}", baseUrl);
log.info("Model: {}", model); log.info("Model: {}", model);
testConnection();
}
private void testConnection() {
log.info("Testing LLM connection to: {}", baseUrl);
// Test 1: Basic connectivity
testEndpoint(baseUrl + "/v1/models", "GET", null);
// Test 2: Chat completions endpoint WITHOUT streaming (POST with minimal payload)
String testPayload = "{\"model\":\"" + model + "\",\"messages\":[{\"role\":\"user\",\"content\":\"ping\"}],\"max_tokens\":1,\"stream\":false}";
log.info("Test payload (stream=false): {}", testPayload);
testEndpoint(baseUrl + "/v1/chat/completions", "POST", testPayload);
// Test 3: Chat completions WITH streaming to compare behavior
String streamPayload = "{\"model\":\"" + model + "\",\"messages\":[{\"role\":\"user\",\"content\":\"ping\"}],\"max_tokens\":1,\"stream\":true}";
log.info("Test payload (stream=true): {}", streamPayload);
testEndpoint(baseUrl + "/v1/chat/completions", "POST", streamPayload);
}
private void testEndpoint(String endpoint, String method, String payload) {
try {
log.info("Testing endpoint: {} {}", method, endpoint);
URL url = URI.create(endpoint).toURL();
HttpURLConnection connection = (HttpURLConnection) url.openConnection();
connection.setRequestMethod(method);
connection.setConnectTimeout(5000);
connection.setReadTimeout(10000);
if (payload != null) {
connection.setDoOutput(true);
connection.setRequestProperty("Content-Type", "application/json");
try (var os = connection.getOutputStream()) {
os.write(payload.getBytes());
}
}
int responseCode = connection.getResponseCode();
String responseMessage = connection.getResponseMessage();
if (responseCode >= 200 && responseCode < 300) {
log.info(" -> SUCCESS (HTTP {} {})", responseCode, responseMessage);
} else {
// Read error body
String errorBody = "";
try (var is = connection.getErrorStream()) {
if (is != null) {
errorBody = new String(is.readAllBytes());
}
}
log.warn(" -> HTTP {} {} - {}", responseCode, responseMessage, errorBody);
}
connection.disconnect();
} catch (java.net.ConnectException e) {
log.error(" -> FAILED - Connection refused: {}", e.getMessage());
} catch (java.net.SocketTimeoutException e) {
log.error(" -> FAILED - Timeout: {}", e.getMessage());
} catch (java.net.UnknownHostException e) {
log.error(" -> FAILED - Unknown host: {}", e.getMessage());
} catch (Exception e) {
log.error(" -> FAILED: {} - {}", e.getClass().getSimpleName(), e.getMessage());
}
} }
public String getBaseUrl() { public String getBaseUrl() {

View File

@@ -5,8 +5,6 @@ import com.fasterxml.jackson.databind.ObjectMapper;
import de.assecutor.votianlt.model.JobStatus; import de.assecutor.votianlt.model.JobStatus;
import de.assecutor.votianlt.service.JobStatisticsService; import de.assecutor.votianlt.service.JobStatisticsService;
import lombok.extern.slf4j.Slf4j; import lombok.extern.slf4j.Slf4j;
import org.springframework.ai.chat.client.ChatClient;
import org.springframework.ai.chat.model.ChatModel;
import org.springframework.stereotype.Service; import org.springframework.stereotype.Service;
import java.time.Month; import java.time.Month;
@@ -17,21 +15,21 @@ import java.util.Map;
/** /**
* Service for AI-assisted statistics analysis with chart visualization. * Service for AI-assisted statistics analysis with chart visualization.
* Uses LM Studio via OpenAI-compatible API and local job statistics. * Uses LM Studio via direct REST client (like aimailassistant) instead of Spring AI.
*/ */
@Service @Service
@Slf4j @Slf4j
public class AiStatisticsService { public class AiStatisticsService {
private final ChatClient chatClient; private final LlmRestClient llmClient;
private final JobStatisticsService statisticsService; private final JobStatisticsService statisticsService;
private final ObjectMapper objectMapper; private final ObjectMapper objectMapper;
public AiStatisticsService(ChatModel chatModel, JobStatisticsService statisticsService) { public AiStatisticsService(LlmRestClient llmClient, JobStatisticsService statisticsService) {
this.chatClient = ChatClient.builder(chatModel).build(); this.llmClient = llmClient;
this.statisticsService = statisticsService; this.statisticsService = statisticsService;
this.objectMapper = new ObjectMapper(); this.objectMapper = new ObjectMapper();
log.info("AiStatisticsService initialized"); log.info("AiStatisticsService initialized with direct REST client");
} }
/** /**
@@ -54,28 +52,35 @@ public class AiStatisticsService {
// Determine query type and prepare chart data // Determine query type and prepare chart data
QueryAnalysis analysis = analyzeQueryType(userQuery); QueryAnalysis analysis = analyzeQueryType(userQuery);
log.debug("Query analysis - Type: {}, Chart: {}", analysis.queryType, analysis.chartType);
// Build prompt for LLM // Build prompt for LLM
String prompt = buildPrompt(userQuery, statisticsContext, analysis); String prompt = buildPrompt(userQuery, statisticsContext, analysis);
try { // System prompt for the statistics assistant
// Get LLM response String systemPrompt = """
String llmResponse = chatClient.prompt() Du bist ein hilfreicher Statistik-Assistent für ein Logistikunternehmen.
.user(prompt) Beantworte die Frage des Benutzers basierend auf den aktuellen Statistiken.
.call()
.content();
log.info("LLM response received"); WICHTIGE FORMATIERUNGSREGELN:
- Verwende KEINE Tabellen (keine | oder --- Zeichen)
- Die Daten werden bereits als interaktives Diagramm visualisiert
- Fasse die wichtigsten Erkenntnisse in Fließtext oder kurzen Aufzählungen zusammen
- Nenne konkrete Zahlen im Text, aber liste nicht alle Werte tabellarisch auf
// Build chart data based on query type Antworte auf Deutsch, präzise und freundlich.
String chartType = analysis.chartType; Erkläre die Daten kurz und gib bei Bedarf Empfehlungen.
String chartData = analysis.chartData; Halte die Antwort kompakt (max. 3-4 Sätze für einfache Fragen, mehr für komplexe).
""";
return new StatisticsResponse(llmResponse, chartType, chartData); // Call LLM via direct REST client (like aimailassistant)
String llmResponse = llmClient.chat(systemPrompt, prompt);
} catch (Exception e) { if (llmResponse != null) {
log.error("Error calling LLM: {}", e.getMessage(), e); log.info("LLM response received, length: {} chars", llmResponse.length());
// Fallback: Return statistics without LLM analysis return new StatisticsResponse(llmResponse, analysis.chartType, analysis.chartData);
} else {
log.warn("LLM returned null response, using fallback");
return new StatisticsResponse( return new StatisticsResponse(
buildFallbackResponse(analysis), buildFallbackResponse(analysis),
analysis.chartType, analysis.chartType,
@@ -305,23 +310,11 @@ public class AiStatisticsService {
} }
private String buildPrompt(String userQuery, String statisticsContext, QueryAnalysis analysis) { private String buildPrompt(String userQuery, String statisticsContext, QueryAnalysis analysis) {
// User prompt contains only the context and question (system prompt is passed separately)
return String.format(""" return String.format("""
Du bist ein hilfreicher Statistik-Assistent für ein Logistikunternehmen.
Beantworte die Frage des Benutzers basierend auf den aktuellen Statistiken.
%s %s
**Benutzerfrage:** %s **Benutzerfrage:** %s
WICHTIGE FORMATIERUNGSREGELN:
- Verwende KEINE Tabellen (keine | oder --- Zeichen)
- Die Daten werden bereits als interaktives Diagramm visualisiert
- Fasse die wichtigsten Erkenntnisse in Fließtext oder kurzen Aufzählungen zusammen
- Nenne konkrete Zahlen im Text, aber liste nicht alle Werte tabellarisch auf
Antworte auf Deutsch, präzise und freundlich.
Erkläre die Daten kurz und gib bei Bedarf Empfehlungen.
Halte die Antwort kompakt (max. 3-4 Sätze für einfache Fragen, mehr für komplexe).
""", statisticsContext, userQuery); """, statisticsContext, userQuery);
} }

View File

@@ -0,0 +1,125 @@
package de.assecutor.votianlt.ai.service;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.ObjectMapper;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.http.MediaType;
import org.springframework.stereotype.Component;
import org.springframework.web.reactive.function.client.WebClient;
import java.time.Duration;
import java.util.List;
import java.util.Map;
/**
* Direct REST client for LM Studio API.
* Uses Spring WebClient like aimailassistant - bypasses Spring AI.
*/
@Component
@Slf4j
public class LlmRestClient {
private final WebClient webClient;
private final ObjectMapper objectMapper;
private final String model;
public LlmRestClient(
@Value("${spring.ai.openai.base-url:http://192.168.180.10:1234}") String baseUrl,
@Value("${spring.ai.openai.chat.options.model:local-model}") String model,
ObjectMapper objectMapper) {
this.webClient = WebClient.builder()
.baseUrl(baseUrl + "/v1/chat/completions")
.build();
this.model = model;
this.objectMapper = objectMapper;
log.info("LlmRestClient initialized - URL: {}/v1/chat/completions, Model: {}", baseUrl, model);
}
/**
* Send a chat completion request to LM Studio.
*
* @param systemPrompt System prompt for context
* @param userMessage User message/question
* @return LLM response text, or null on error
*/
public String chat(String systemPrompt, String userMessage) {
return chat(systemPrompt, userMessage, 0.7, 2000);
}
/**
* Send a chat completion request to LM Studio with custom parameters.
*
* @param systemPrompt System prompt for context
* @param userMessage User message/question
* @param temperature Temperature for response randomness (0.0-1.0)
* @param maxTokens Maximum tokens in response
* @return LLM response text, or null on error
*/
public String chat(String systemPrompt, String userMessage, double temperature, int maxTokens) {
try {
Map<String, Object> request = Map.of(
"model", model,
"messages", List.of(
Map.of("role", "system", "content", systemPrompt != null ? systemPrompt : ""),
Map.of("role", "user", "content", userMessage)
),
"temperature", temperature,
"max_tokens", maxTokens,
"stream", false // WICHTIG: Kein Streaming!
);
log.info("Sending request to LLM (model: {}, prompt length: {} chars)...",
model, userMessage.length());
long startTime = System.currentTimeMillis();
String response = webClient.post()
.contentType(MediaType.APPLICATION_JSON)
.bodyValue(request)
.retrieve()
.bodyToMono(String.class)
.timeout(Duration.ofSeconds(120))
.block();
long duration = System.currentTimeMillis() - startTime;
log.info("LLM response received in {}ms", duration);
log.debug("Raw LLM response: {}", response);
return extractContent(response);
} catch (Exception e) {
log.error("Error calling LLM API: {} - {}", e.getClass().getSimpleName(), e.getMessage());
if (log.isDebugEnabled()) {
log.debug("Full stack trace:", e);
}
return null;
}
}
/**
* Simple chat without system prompt.
*/
public String chat(String userMessage) {
return chat(null, userMessage);
}
private String extractContent(String response) {
if (response == null) {
return null;
}
try {
JsonNode root = objectMapper.readTree(response);
JsonNode choices = root.path("choices");
if (choices.isArray() && !choices.isEmpty()) {
return choices.get(0).path("message").path("content").asText();
}
log.warn("Unexpected response structure: {}", response);
return null;
} catch (Exception e) {
log.error("Error parsing LLM response: {}", e.getMessage());
return null;
}
}
}

View File

@@ -15,3 +15,17 @@ logging.level.root=WARN
logging.file.name=logs/votianlt-production.log logging.file.name=logs/votianlt-production.log
logging.file.max-size=50MB logging.file.max-size=50MB
logging.file.max-history=90 logging.file.max-history=90
# Debug logging for AI/LLM troubleshooting (can be disabled after debugging)
logging.level.org.springframework.ai=DEBUG
logging.level.org.springframework.web.client.RestTemplate=DEBUG
logging.level.org.springframework.web.client.RestClient=DEBUG
logging.level.org.apache.http=DEBUG
logging.level.org.apache.http.wire=DEBUG
logging.level.org.apache.http.headers=DEBUG
# Java HTTP Client logging
logging.level.jdk.httpclient=DEBUG
logging.level.java.net.http=DEBUG
# Spring HTTP logging
logging.level.org.springframework.http.client=DEBUG
logging.level.de.assecutor.votianlt.ai=DEBUG

View File

@@ -113,6 +113,13 @@ spring.ai.openai.api-key=not-used
spring.ai.openai.chat.options.model=local-model spring.ai.openai.chat.options.model=local-model
spring.ai.openai.chat.options.temperature=0.7 spring.ai.openai.chat.options.temperature=0.7
# WICHTIG: Streaming deaktivieren - LM Studio/Docker können Streaming-Responses nicht korrekt handlen
spring.ai.openai.chat.options.stream=false
# Timeouts für OpenAI Client
spring.ai.openai.connect-timeout=10s
spring.ai.openai.read-timeout=120s
# =========================================== # ===========================================
# MCP Server Configuration # MCP Server Configuration
# =========================================== # ===========================================