Files
hudi-service/service-ai/service-ai-web/src/main/resources/application.yml

48 lines
1.6 KiB
YAML

server:
compression:
enabled: true
spring:
application:
name: service-ai-web
profiles:
include: random-port,common,discovery,metrics,forest
mvc:
async:
request-timeout: 3600000
autoconfigure:
exclude: |
org.springframework.ai.model.openai.autoconfigure.OpenAiChatAutoConfiguration,
org.springframework.ai.model.openai.autoconfigure.OpenAiAudioSpeechAutoConfiguration,
org.springframework.ai.model.openai.autoconfigure.OpenAiAudioTranscriptionAutoConfiguration,
org.springframework.ai.model.openai.autoconfigure.OpenAiImageAutoConfiguration,
org.springframework.ai.model.openai.autoconfigure.OpenAiEmbeddingAutoConfiguration,
org.springframework.ai.model.openai.autoconfigure.OpenAiModerationAutoConfiguration,
org.springframework.ai.model.deepseek.autoconfigure.DeepSeekChatAutoConfiguration
ai:
vectorstore:
qdrant:
host: 132.121.206.65
port: 29463
api-key: ENC(0/0UkIKeAvyV17yNqSU3v04wmm8CdWKe4BYSSJa2FuBtK12TcZRJPdwk+ZpYnpISv+KmVTUrrmFBzAYrDR3ysA==)
llm:
base-url: http://132.121.206.65:10086
api-key: ENC(K+Hff9QGC+fcyi510VIDd9CaeK/IN5WBJ9rlkUsHEdDgIidW+stHHJlsK0lLPUXXREha+ToQZqqDXJrqSE+GUKCXklFhelD8bRHFXBIeP/ZzT2cxhzgKUXgjw3S0Qw2R)
chat:
base-url: ${spring.llm.base-url}/v1
model: 'Qwen3/qwen3-1.7b'
visual:
model: 'Qwen2.5/qwen2.5-vl-7b-q4km'
embedding:
model: 'Qwen3/qwen3-embedding-4b'
reranker:
model: 'BGE/beg-reranker-v2'
jpa:
show-sql: true
generate-ddl: false
liteflow:
rule-source: liteflow.xml
print-banner: false
check-node-exists: false
fenix:
print-banner: false