You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

318 lines
10 KiB

11 months ago
  1. SERVER_PORT=3001
  2. STORAGE_DIR="/app/server/storage"
  3. UID='1000'
  4. GID='1000'
  5. # SIG_KEY='passphrase' # Please generate random string at least 32 chars long.
  6. # SIG_SALT='salt' # Please generate random string at least 32 chars long.
  7. # JWT_SECRET="my-random-string-for-seeding" # Only needed if AUTH_TOKEN is set. Please generate random string at least 12 chars long.
  8. ###########################################
  9. ######## LLM API SElECTION ################
  10. ###########################################
  11. # LLM_PROVIDER='openai'
  12. # OPEN_AI_KEY=
  13. # OPEN_MODEL_PREF='gpt-4o'
  14. # LLM_PROVIDER='gemini'
  15. # GEMINI_API_KEY=
  16. # GEMINI_LLM_MODEL_PREF='gemini-pro'
  17. # LLM_PROVIDER='azure'
  18. # AZURE_OPENAI_ENDPOINT=
  19. # AZURE_OPENAI_KEY=
  20. # OPEN_MODEL_PREF='my-gpt35-deployment' # This is the "deployment" on Azure you want to use. Not the base model.
  21. # EMBEDDING_MODEL_PREF='embedder-model' # This is the "deployment" on Azure you want to use for embeddings. Not the base model. Valid base model is text-embedding-ada-002
  22. # LLM_PROVIDER='anthropic'
  23. # ANTHROPIC_API_KEY=sk-ant-xxxx
  24. # ANTHROPIC_MODEL_PREF='claude-2'
  25. # LLM_PROVIDER='lmstudio'
  26. # LMSTUDIO_BASE_PATH='http://your-server:1234/v1'
  27. # LMSTUDIO_MODEL_PREF='Loaded from Chat UI' # this is a bug in LMStudio 0.2.17
  28. # LMSTUDIO_MODEL_TOKEN_LIMIT=4096
  29. # LLM_PROVIDER='localai'
  30. # LOCAL_AI_BASE_PATH='http://host.docker.internal:8080/v1'
  31. # LOCAL_AI_MODEL_PREF='luna-ai-llama2'
  32. # LOCAL_AI_MODEL_TOKEN_LIMIT=4096
  33. # LOCAL_AI_API_KEY="sk-123abc"
  34. # LLM_PROVIDER='ollama'
  35. # OLLAMA_BASE_PATH='http://host.docker.internal:11434'
  36. # OLLAMA_MODEL_PREF='llama2'
  37. # OLLAMA_MODEL_TOKEN_LIMIT=4096
  38. # LLM_PROVIDER='togetherai'
  39. # TOGETHER_AI_API_KEY='my-together-ai-key'
  40. # TOGETHER_AI_MODEL_PREF='mistralai/Mixtral-8x7B-Instruct-v0.1'
  41. # LLM_PROVIDER='mistral'
  42. # MISTRAL_API_KEY='example-mistral-ai-api-key'
  43. # MISTRAL_MODEL_PREF='mistral-tiny'
  44. # LLM_PROVIDER='perplexity'
  45. # PERPLEXITY_API_KEY='my-perplexity-key'
  46. # PERPLEXITY_MODEL_PREF='codellama-34b-instruct'
  47. # LLM_PROVIDER='openrouter'
  48. # OPENROUTER_API_KEY='my-openrouter-key'
  49. # OPENROUTER_MODEL_PREF='openrouter/auto'
  50. # LLM_PROVIDER='huggingface'
  51. # HUGGING_FACE_LLM_ENDPOINT=https://uuid-here.us-east-1.aws.endpoints.huggingface.cloud
  52. # HUGGING_FACE_LLM_API_KEY=hf_xxxxxx
  53. # HUGGING_FACE_LLM_TOKEN_LIMIT=8000
  54. # LLM_PROVIDER='groq'
  55. # GROQ_API_KEY=gsk_abcxyz
  56. # GROQ_MODEL_PREF=llama3-8b-8192
  57. # LLM_PROVIDER='koboldcpp'
  58. # KOBOLD_CPP_BASE_PATH='http://127.0.0.1:5000/v1'
  59. # KOBOLD_CPP_MODEL_PREF='koboldcpp/codellama-7b-instruct.Q4_K_S'
  60. # KOBOLD_CPP_MODEL_TOKEN_LIMIT=4096
  61. # LLM_PROVIDER='textgenwebui'
  62. # TEXT_GEN_WEB_UI_BASE_PATH='http://127.0.0.1:5000/v1'
  63. # TEXT_GEN_WEB_UI_TOKEN_LIMIT=4096
  64. # TEXT_GEN_WEB_UI_API_KEY='sk-123abc'
  65. # LLM_PROVIDER='generic-openai'
  66. # GENERIC_OPEN_AI_BASE_PATH='http://proxy.url.openai.com/v1'
  67. # GENERIC_OPEN_AI_MODEL_PREF='gpt-3.5-turbo'
  68. # GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT=4096
  69. # GENERIC_OPEN_AI_API_KEY=sk-123abc
  70. # LLM_PROVIDER='litellm'
  71. # LITE_LLM_MODEL_PREF='gpt-3.5-turbo'
  72. # LITE_LLM_MODEL_TOKEN_LIMIT=4096
  73. # LITE_LLM_BASE_PATH='http://127.0.0.1:4000'
  74. # LITE_LLM_API_KEY='sk-123abc'
  75. # LLM_PROVIDER='novita'
  76. # NOVITA_LLM_API_KEY='your-novita-api-key-here' check on https://novita.ai/settings/key-management
  77. # NOVITA_LLM_MODEL_PREF='deepseek/deepseek-r1'
  78. # LLM_PROVIDER='cohere'
  79. # COHERE_API_KEY=
  80. # COHERE_MODEL_PREF='command-r'
  81. # LLM_PROVIDER='bedrock'
  82. # AWS_BEDROCK_LLM_ACCESS_KEY_ID=
  83. # AWS_BEDROCK_LLM_ACCESS_KEY=
  84. # AWS_BEDROCK_LLM_REGION=us-west-2
  85. # AWS_BEDROCK_LLM_MODEL_PREFERENCE=meta.llama3-1-8b-instruct-v1:0
  86. # AWS_BEDROCK_LLM_MODEL_TOKEN_LIMIT=8191
  87. # LLM_PROVIDER='fireworksai'
  88. # FIREWORKS_AI_LLM_API_KEY='my-fireworks-ai-key'
  89. # FIREWORKS_AI_LLM_MODEL_PREF='accounts/fireworks/models/llama-v3p1-8b-instruct'
  90. # LLM_PROVIDER='apipie'
  91. # APIPIE_LLM_API_KEY='sk-123abc'
  92. # APIPIE_LLM_MODEL_PREF='openrouter/llama-3.1-8b-instruct'
  93. # LLM_PROVIDER='xai'
  94. # XAI_LLM_API_KEY='xai-your-api-key-here'
  95. # XAI_LLM_MODEL_PREF='grok-beta'
  96. # LLM_PROVIDER='nvidia-nim'
  97. # NVIDIA_NIM_LLM_BASE_PATH='http://127.0.0.1:8000'
  98. # NVIDIA_NIM_LLM_MODEL_PREF='meta/llama-3.2-3b-instruct'
  99. # LLM_PROVIDER='deepseek'
  100. # DEEPSEEK_API_KEY='your-deepseek-api-key-here'
  101. # DEEPSEEK_MODEL_PREF='deepseek-chat'
  102. ###########################################
  103. ######## Embedding API SElECTION ##########
  104. ###########################################
  105. # Only used if you are using an LLM that does not natively support embedding (openai or Azure)
  106. # EMBEDDING_ENGINE='openai'
  107. # OPEN_AI_KEY=sk-xxxx
  108. # EMBEDDING_MODEL_PREF='text-embedding-ada-002'
  109. # EMBEDDING_ENGINE='azure'
  110. # AZURE_OPENAI_ENDPOINT=
  111. # AZURE_OPENAI_KEY=
  112. # EMBEDDING_MODEL_PREF='my-embedder-model' # This is the "deployment" on Azure you want to use for embeddings. Not the base model. Valid base model is text-embedding-ada-002
  113. # EMBEDDING_ENGINE='localai'
  114. # EMBEDDING_BASE_PATH='http://localhost:8080/v1'
  115. # EMBEDDING_MODEL_PREF='text-embedding-ada-002'
  116. # EMBEDDING_MODEL_MAX_CHUNK_LENGTH=1000 # The max chunk size in chars a string to embed can be
  117. # EMBEDDING_ENGINE='ollama'
  118. # EMBEDDING_BASE_PATH='http://host.docker.internal:11434'
  119. # EMBEDDING_MODEL_PREF='nomic-embed-text:latest'
  120. # EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192
  121. # EMBEDDING_ENGINE='lmstudio'
  122. # EMBEDDING_BASE_PATH='https://host.docker.internal:1234/v1'
  123. # EMBEDDING_MODEL_PREF='nomic-ai/nomic-embed-text-v1.5-GGUF/nomic-embed-text-v1.5.Q4_0.gguf'
  124. # EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192
  125. # EMBEDDING_ENGINE='cohere'
  126. # COHERE_API_KEY=
  127. # EMBEDDING_MODEL_PREF='embed-english-v3.0'
  128. # EMBEDDING_ENGINE='voyageai'
  129. # VOYAGEAI_API_KEY=
  130. # EMBEDDING_MODEL_PREF='voyage-large-2-instruct'
  131. # EMBEDDING_ENGINE='litellm'
  132. # EMBEDDING_MODEL_PREF='text-embedding-ada-002'
  133. # EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192
  134. # LITE_LLM_BASE_PATH='http://127.0.0.1:4000'
  135. # LITE_LLM_API_KEY='sk-123abc'
  136. # EMBEDDING_ENGINE='generic-openai'
  137. # EMBEDDING_MODEL_PREF='text-embedding-ada-002'
  138. # EMBEDDING_MODEL_MAX_CHUNK_LENGTH=8192
  139. # EMBEDDING_BASE_PATH='http://127.0.0.1:4000'
  140. # GENERIC_OPEN_AI_EMBEDDING_API_KEY='sk-123abc'
  141. # GENERIC_OPEN_AI_EMBEDDING_MAX_CONCURRENT_CHUNKS=500
  142. # EMBEDDING_ENGINE='gemini'
  143. # GEMINI_EMBEDDING_API_KEY=
  144. # EMBEDDING_MODEL_PREF='text-embedding-004'
  145. ###########################################
  146. ######## Vector Database Selection ########
  147. ###########################################
  148. # Enable all below if you are using vector database: Chroma.
  149. # VECTOR_DB="chroma"
  150. # CHROMA_ENDPOINT='http://host.docker.internal:8000'
  151. # CHROMA_API_HEADER="X-Api-Key"
  152. # CHROMA_API_KEY="sk-123abc"
  153. # Enable all below if you are using vector database: Pinecone.
  154. # VECTOR_DB="pinecone"
  155. # PINECONE_API_KEY=
  156. # PINECONE_INDEX=
  157. # Enable all below if you are using vector database: LanceDB.
  158. # VECTOR_DB="lancedb"
  159. # Enable all below if you are using vector database: Weaviate.
  160. # VECTOR_DB="weaviate"
  161. # WEAVIATE_ENDPOINT="http://localhost:8080"
  162. # WEAVIATE_API_KEY=
  163. # Enable all below if you are using vector database: Qdrant.
  164. # VECTOR_DB="qdrant"
  165. # QDRANT_ENDPOINT="http://localhost:6333"
  166. # QDRANT_API_KEY=
  167. # Enable all below if you are using vector database: Milvus.
  168. # VECTOR_DB="milvus"
  169. # MILVUS_ADDRESS="http://localhost:19530"
  170. # MILVUS_USERNAME=
  171. # MILVUS_PASSWORD=
  172. # Enable all below if you are using vector database: Zilliz Cloud.
  173. # VECTOR_DB="zilliz"
  174. # ZILLIZ_ENDPOINT="https://sample.api.gcp-us-west1.zillizcloud.com"
  175. # ZILLIZ_API_TOKEN=api-token-here
  176. # Enable all below if you are using vector database: Astra DB.
  177. # VECTOR_DB="astra"
  178. # ASTRA_DB_APPLICATION_TOKEN=
  179. # ASTRA_DB_ENDPOINT=
  180. ###########################################
  181. ######## Audio Model Selection ############
  182. ###########################################
  183. # (default) use built-in whisper-small model.
  184. # WHISPER_PROVIDER="local"
  185. # use openai hosted whisper model.
  186. # WHISPER_PROVIDER="openai"
  187. # OPEN_AI_KEY=sk-xxxxxxxx
  188. ###########################################
  189. ######## TTS/STT Model Selection ##########
  190. ###########################################
  191. # TTS_PROVIDER="native"
  192. # TTS_PROVIDER="openai"
  193. # TTS_OPEN_AI_KEY=sk-example
  194. # TTS_OPEN_AI_VOICE_MODEL=nova
  195. # TTS_PROVIDER="generic-openai"
  196. # TTS_OPEN_AI_COMPATIBLE_KEY=sk-example
  197. # TTS_OPEN_AI_COMPATIBLE_VOICE_MODEL=nova
  198. # TTS_OPEN_AI_COMPATIBLE_ENDPOINT="https://api.openai.com/v1"
  199. # TTS_PROVIDER="elevenlabs"
  200. # TTS_ELEVEN_LABS_KEY=
  201. # TTS_ELEVEN_LABS_VOICE_MODEL=21m00Tcm4TlvDq8ikWAM # Rachel
  202. # CLOUD DEPLOYMENT VARIRABLES ONLY
  203. # AUTH_TOKEN="hunter2" # This is the password to your application if remote hosting.
  204. # DISABLE_TELEMETRY="false"
  205. ###########################################
  206. ######## PASSWORD COMPLEXITY ##############
  207. ###########################################
  208. # Enforce a password schema for your organization users.
  209. # Documentation on how to use https://github.com/kamronbatman/joi-password-complexity
  210. # Default is only 8 char minimum
  211. # PASSWORDMINCHAR=8
  212. # PASSWORDMAXCHAR=250
  213. # PASSWORDLOWERCASE=1
  214. # PASSWORDUPPERCASE=1
  215. # PASSWORDNUMERIC=1
  216. # PASSWORDSYMBOL=1
  217. # PASSWORDREQUIREMENTS=4
  218. ###########################################
  219. ######## ENABLE HTTPS SERVER ##############
  220. ###########################################
  221. # By enabling this and providing the path/filename for the key and cert,
  222. # the server will use HTTPS instead of HTTP.
  223. #ENABLE_HTTPS="true"
  224. #HTTPS_CERT_PATH="sslcert/cert.pem"
  225. #HTTPS_KEY_PATH="sslcert/key.pem"
  226. ###########################################
  227. ######## AGENT SERVICE KEYS ###############
  228. ###########################################
  229. #------ SEARCH ENGINES -------
  230. #=============================
  231. #------ Google Search -------- https://programmablesearchengine.google.com/controlpanel/create
  232. # AGENT_GSE_KEY=
  233. # AGENT_GSE_CTX=
  234. #------ SearchApi.io ----------- https://www.searchapi.io/
  235. # AGENT_SEARCHAPI_API_KEY=
  236. # AGENT_SEARCHAPI_ENGINE=google
  237. #------ Serper.dev ----------- https://serper.dev/
  238. # AGENT_SERPER_DEV_KEY=
  239. #------ Bing Search ----------- https://portal.azure.com/
  240. # AGENT_BING_SEARCH_API_KEY=
  241. #------ Serply.io ----------- https://serply.io/
  242. # AGENT_SERPLY_API_KEY=
  243. #------ SearXNG ----------- https://github.com/searxng/searxng
  244. # AGENT_SEARXNG_API_URL=
  245. #------ Tavily ----------- https://www.tavily.com/
  246. # AGENT_TAVILY_API_KEY=
  247. ###########################################
  248. ######## Other Configurations ############
  249. ###########################################
  250. # Disable viewing chat history from the UI and frontend APIs.
  251. # See https://docs.anythingllm.com/configuration#disable-view-chat-history for more information.
  252. # DISABLE_VIEW_CHAT_HISTORY=1
  253. # Enable simple SSO passthrough to pre-authenticate users from a third party service.
  254. # See https://docs.anythingllm.com/configuration#simple-sso-passthrough for more information.
  255. # SIMPLE_SSO_ENABLED=1