29 lines
989 B
Plaintext
29 lines
989 B
Plaintext
# ===== 网关与后端端口 =====
|
||
GATEWAY_HOST=127.0.0.1
|
||
GATEWAY_PORT=8080
|
||
BACKEND_HOST=127.0.0.1
|
||
BACKEND_PORT=8081
|
||
|
||
# ===== 推理参数 =====
|
||
THINK_MODE=think-on
|
||
CTX_SIZE=16384
|
||
IMAGE_MIN_TOKENS=256
|
||
IMAGE_MAX_TOKENS=1024
|
||
MMPROJ_OFFLOAD=off
|
||
|
||
# ===== 文件系统只读范围 =====
|
||
READONLY_FS_ROOTS= # 留空时默认只允许读取项目目录;多个目录用分号分隔,例如 D:\docs;D:\projects
|
||
READONLY_FS_MAX_READ_BYTES=524288 # 单次读取上限,默认 512KB
|
||
|
||
# ===== 9B 模型路径(可不改,使用默认目录) =====
|
||
MODEL_PATH=.tmp/models/crossrepo/lmstudio-community__Qwen3.5-9B-GGUF/Qwen3.5-9B-Q4_K_M.gguf
|
||
MMPROJ_PATH=.tmp/models/crossrepo/lmstudio-community__Qwen3.5-9B-GGUF/mmproj-Qwen3.5-9B-BF16.gguf
|
||
# 如果要一键切到 Q8,可执行 .\install_q8.cmd,它会自动把下面两项改成 Q8
|
||
|
||
# ===== 安装阶段下载源(可选覆盖) =====
|
||
# LLAMA_WIN_CUDA_URL=
|
||
# MODEL_GGUF_URL=
|
||
# MODEL_MMPROJ_URL=
|
||
# MODEL_GGUF_SHA256=
|
||
# MODEL_MMPROJ_SHA256=
|