This commit is contained in:
sam 2025-09-27 20:03:04 +08:00
parent d2009d0793
commit 6384d7426c
6 changed files with 221 additions and 0 deletions

27
LICENSE Normal file
View File

@ -0,0 +1,27 @@
LLM Quant Framework License v1.0
Copyright (c) 2025, Qiang
Permission is hereby granted, free of charge, to any individual obtaining a copy
of this software and associated documentation files (the "Software"), to use,
copy, modify, and distribute the Software for personal, non-commercial purposes,
subject to the following conditions:
1. Attribution. Any redistribution or derivative work must include a prominent
notice acknowledging the original source:
"Based on the LLM Quant Framework by Qiang" and provide a link to the
original repository if distributed electronically.
2. Commercial Use. Any use of the Software, in whole or in part, for commercial
purposes requires prior written permission and a separate licensing agreement
with the copyright holder. Commercial purposes include, but are not limited
to, selling, licensing, offering as a hosted service, or integrating into a
commercial product.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -54,6 +54,13 @@ export TUSHARE_TOKEN="<your-token>"
- Streamlit交互式前端
- Plotly行情与指标可视化
- TuShare行情与基础面数据源
- Requests统一访问 Ollama / OpenAI 兼容 API
### LLM 配置与测试
- 默认使用本地 Ollama`http://localhost:11434`),可在 Streamlit 的 “数据与设置” 页签切换到 OpenAI 兼容接口。
- 修改 Provider/模型/Base URL/API Key 后点击 “保存 LLM 设置”,更新内容仅在当前会话生效。
- 在 “自检测试” 页新增 “LLM 接口测试”,可输入 Prompt 快速验证调用结果,日志会记录限频与错误信息便于排查。
## 快速开始
@ -77,3 +84,7 @@ Streamlit `自检测试` 页签提供:
2. 将代理效用写入 SQLite 的 `agent_utils``alloc_log` 表,驱动 UI 决策解释。
3. 使用轻量情感分析与热度计算填充 `news`、`heat_daily` 与热点指数。
4. 接入本地小模型或 API 完成人类可读的策略建议卡片,形成端到端体验。
## License
本项目采用定制的 “LLM Quant Framework License v1.0”。个人使用、修改与分发需保留出处,任何商业用途须事先与版权方协商并签署付费协议。详情参见仓库根目录的 `LICENSE` 文件。

122
app/llm/client.py Normal file
View File

@ -0,0 +1,122 @@
"""Unified LLM client supporting Ollama and OpenAI compatible APIs."""
from __future__ import annotations
import json
from dataclasses import asdict
from typing import Dict, Iterable, List, Optional
import requests
from app.utils.config import get_config
from app.utils.logging import get_logger
LOGGER = get_logger(__name__)
LOG_EXTRA = {"stage": "llm"}
class LLMError(RuntimeError):
"""Raised when LLM provider returns an error response."""
def _default_base_url(provider: str) -> str:
if provider == "ollama":
return "http://localhost:11434"
return "https://api.openai.com"
def _build_messages(prompt: str, system: Optional[str] = None) -> List[Dict[str, str]]:
messages: List[Dict[str, str]] = []
if system:
messages.append({"role": "system", "content": system})
messages.append({"role": "user", "content": prompt})
return messages
def _request_ollama(model: str, prompt: str, *, base_url: str, temperature: float, timeout: float, system: Optional[str]) -> str:
url = f"{base_url.rstrip('/')}/api/chat"
payload = {
"model": model,
"messages": _build_messages(prompt, system),
"stream": False,
"options": {"temperature": temperature},
}
LOGGER.debug("调用 Ollama: %s %s", model, url, extra=LOG_EXTRA)
response = requests.post(url, json=payload, timeout=timeout)
if response.status_code != 200:
raise LLMError(f"Ollama 调用失败: {response.status_code} {response.text}")
data = response.json()
message = data.get("message", {})
content = message.get("content", "")
if isinstance(content, list):
return "".join(chunk.get("text", "") or chunk.get("content", "") for chunk in content)
return str(content)
def _request_openai(model: str, prompt: str, *, base_url: str, api_key: str, temperature: float, timeout: float, system: Optional[str]) -> str:
url = f"{base_url.rstrip('/')}/v1/chat/completions"
headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json",
}
payload = {
"model": model,
"messages": _build_messages(prompt, system),
"temperature": temperature,
}
LOGGER.debug("调用 OpenAI 兼容接口: %s %s", model, url, extra=LOG_EXTRA)
response = requests.post(url, headers=headers, json=payload, timeout=timeout)
if response.status_code != 200:
raise LLMError(f"OpenAI API 调用失败: {response.status_code} {response.text}")
data = response.json()
try:
return data["choices"][0]["message"]["content"].strip()
except (KeyError, IndexError) as exc:
raise LLMError(f"OpenAI 响应解析失败: {json.dumps(data, ensure_ascii=False)}") from exc
def run_llm(prompt: str, *, system: Optional[str] = None) -> str:
"""Execute the configured LLM provider with the given prompt."""
cfg = get_config().llm
provider = (cfg.provider or "ollama").lower()
base_url = cfg.base_url or _default_base_url(provider)
model = cfg.model
temperature = max(0.0, min(cfg.temperature, 2.0))
timeout = max(5.0, cfg.timeout or 30.0)
LOGGER.info(
"触发 LLM 请求provider=%s model=%s base=%s", provider, model, base_url, extra=LOG_EXTRA
)
if provider == "openai":
if not cfg.api_key:
raise LLMError("缺少 OpenAI 兼容 API Key")
return _request_openai(
model,
prompt,
base_url=base_url,
api_key=cfg.api_key,
temperature=temperature,
timeout=timeout,
system=system,
)
if provider == "ollama":
return _request_ollama(
model,
prompt,
base_url=base_url,
temperature=temperature,
timeout=timeout,
system=system,
)
raise LLMError(f"不支持的 LLM provider: {cfg.provider}")
def llm_config_snapshot() -> Dict[str, object]:
"""Return a sanitized snapshot of current LLM configuration for debugging."""
cfg = get_config().llm
data = asdict(cfg)
if data.get("api_key"):
data["api_key"] = "***"
return data

View File

@ -18,6 +18,7 @@ from app.backtest.engine import BtConfig, run_backtest
from app.data.schema import initialize_database
from app.ingest.checker import run_boot_check
from app.ingest.tushare import FetchJob, run_ingestion
from app.llm.client import llm_config_snapshot, run_llm
from app.llm.explain import make_human_card
from app.utils.config import get_config
from app.utils.db import db_session
@ -190,6 +191,32 @@ def render_settings() -> None:
st.write("新闻源开关与数据库备份将在此配置。")
st.divider()
st.subheader("LLM 设置")
llm_cfg = cfg.llm
providers = ["ollama", "openai"]
try:
provider_index = providers.index((llm_cfg.provider or "ollama").lower())
except ValueError:
provider_index = 0
selected_provider = st.selectbox("LLM Provider", providers, index=provider_index)
llm_model = st.text_input("LLM 模型", value=llm_cfg.model)
llm_base = st.text_input("LLM Base URL (可选)", value=llm_cfg.base_url or "")
llm_api_key = st.text_input("LLM API Key (OpenAI 类需要)", value=llm_cfg.api_key or "", type="password")
llm_temperature = st.slider("LLM 温度", min_value=0.0, max_value=2.0, value=float(llm_cfg.temperature), step=0.05)
llm_timeout = st.number_input("请求超时时间 (秒)", min_value=5.0, max_value=120.0, value=float(llm_cfg.timeout), step=5.0)
if st.button("保存 LLM 设置"):
llm_cfg.provider = selected_provider
llm_cfg.model = llm_model.strip() or llm_cfg.model
llm_cfg.base_url = llm_base.strip() or None
llm_cfg.api_key = llm_api_key.strip() or None
llm_cfg.temperature = llm_temperature
llm_cfg.timeout = llm_timeout
LOGGER.info("LLM 配置已更新:%s", llm_config_snapshot(), extra=LOG_EXTRA)
st.success("LLM 设置已保存,仅在当前会话生效。")
st.json(llm_config_snapshot())
def render_tests() -> None:
LOGGER.info("渲染自检页面", extra=LOG_EXTRA)
@ -397,6 +424,27 @@ def render_tests() -> None:
st.dataframe(df_reset.tail(20), width='stretch')
LOGGER.info("行情可视化完成,展示行数=%s", len(df_reset), extra=LOG_EXTRA)
st.divider()
st.subheader("LLM 接口测试")
st.json(llm_config_snapshot())
llm_prompt = st.text_area("测试 Prompt", value="请概述今天的市场重点。", height=160)
system_prompt = st.text_area(
"System Prompt (可选)",
value="你是一名量化策略研究助手,用简洁中文回答。",
height=100,
)
if st.button("执行 LLM 测试"):
with st.spinner("正在调用 LLM..."):
try:
response = run_llm(llm_prompt, system=system_prompt or None)
except Exception as exc: # noqa: BLE001
LOGGER.exception("LLM 测试失败", extra=LOG_EXTRA)
st.error(f"LLM 调用失败:{exc}")
else:
LOGGER.info("LLM 测试成功", extra=LOG_EXTRA)
st.success("LLM 调用成功,以下为返回内容:")
st.write(response)
def main() -> None:
LOGGER.info("初始化 Streamlit UI", extra=LOG_EXTRA)

View File

@ -44,6 +44,17 @@ class AgentWeights:
"A_macro": self.macro,
}
@dataclass
class LLMConfig:
"""Configuration for LLM providers (Ollama / OpenAI-compatible)."""
provider: str = "ollama" # Options: "ollama", "openai"
model: str = "llama3"
base_url: Optional[str] = None # Defaults resolved per provider
api_key: Optional[str] = None
temperature: float = 0.2
timeout: float = 30.0
@dataclass
class AppConfig:
@ -55,6 +66,7 @@ class AppConfig:
data_paths: DataPaths = field(default_factory=DataPaths)
agent_weights: AgentWeights = field(default_factory=AgentWeights)
force_refresh: bool = False
llm: LLMConfig = field(default_factory=LLMConfig)
CONFIG = AppConfig()

View File

@ -2,3 +2,4 @@ pandas>=2.0
plotly>=5.18
streamlit>=1.30
tushare>=1.2
requests>=2.31