You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

995 lines
24 KiB

11 months ago
  1. const { resetAllVectorStores } = require("../vectorStore/resetAllVectorStores");
  2. const KEY_MAPPING = {
  3. LLMProvider: {
  4. envKey: "LLM_PROVIDER",
  5. checks: [isNotEmpty, supportedLLM],
  6. },
  7. // OpenAI Settings
  8. OpenAiKey: {
  9. envKey: "OPEN_AI_KEY",
  10. checks: [isNotEmpty, validOpenAIKey],
  11. },
  12. OpenAiModelPref: {
  13. envKey: "OPEN_MODEL_PREF",
  14. checks: [isNotEmpty],
  15. },
  16. // Azure OpenAI Settings
  17. AzureOpenAiEndpoint: {
  18. envKey: "AZURE_OPENAI_ENDPOINT",
  19. checks: [isNotEmpty],
  20. },
  21. AzureOpenAiTokenLimit: {
  22. envKey: "AZURE_OPENAI_TOKEN_LIMIT",
  23. checks: [validOpenAiTokenLimit],
  24. },
  25. AzureOpenAiKey: {
  26. envKey: "AZURE_OPENAI_KEY",
  27. checks: [isNotEmpty],
  28. },
  29. AzureOpenAiModelPref: {
  30. envKey: "OPEN_MODEL_PREF",
  31. checks: [isNotEmpty],
  32. },
  33. AzureOpenAiEmbeddingModelPref: {
  34. envKey: "EMBEDDING_MODEL_PREF",
  35. checks: [isNotEmpty],
  36. },
  37. AzureOpenAiModelType: {
  38. envKey: "AZURE_OPENAI_MODEL_TYPE",
  39. checks: [
  40. (input) =>
  41. ["default", "reasoning"].includes(input)
  42. ? null
  43. : "Invalid model type. Must be one of: default, reasoning.",
  44. ],
  45. },
  46. // Anthropic Settings
  47. AnthropicApiKey: {
  48. envKey: "ANTHROPIC_API_KEY",
  49. checks: [isNotEmpty, validAnthropicApiKey],
  50. },
  51. AnthropicModelPref: {
  52. envKey: "ANTHROPIC_MODEL_PREF",
  53. checks: [isNotEmpty, validAnthropicModel],
  54. },
  55. GeminiLLMApiKey: {
  56. envKey: "GEMINI_API_KEY",
  57. checks: [isNotEmpty],
  58. },
  59. GeminiLLMModelPref: {
  60. envKey: "GEMINI_LLM_MODEL_PREF",
  61. checks: [isNotEmpty],
  62. },
  63. GeminiSafetySetting: {
  64. envKey: "GEMINI_SAFETY_SETTING",
  65. checks: [validGeminiSafetySetting],
  66. },
  67. // LMStudio Settings
  68. LMStudioBasePath: {
  69. envKey: "LMSTUDIO_BASE_PATH",
  70. checks: [isNotEmpty, validLLMExternalBasePath, validDockerizedUrl],
  71. },
  72. LMStudioModelPref: {
  73. envKey: "LMSTUDIO_MODEL_PREF",
  74. checks: [],
  75. },
  76. LMStudioTokenLimit: {
  77. envKey: "LMSTUDIO_MODEL_TOKEN_LIMIT",
  78. checks: [nonZero],
  79. },
  80. // LocalAI Settings
  81. LocalAiBasePath: {
  82. envKey: "LOCAL_AI_BASE_PATH",
  83. checks: [isNotEmpty, validLLMExternalBasePath, validDockerizedUrl],
  84. },
  85. LocalAiModelPref: {
  86. envKey: "LOCAL_AI_MODEL_PREF",
  87. checks: [],
  88. },
  89. LocalAiTokenLimit: {
  90. envKey: "LOCAL_AI_MODEL_TOKEN_LIMIT",
  91. checks: [nonZero],
  92. },
  93. LocalAiApiKey: {
  94. envKey: "LOCAL_AI_API_KEY",
  95. checks: [],
  96. },
  97. OllamaLLMBasePath: {
  98. envKey: "OLLAMA_BASE_PATH",
  99. checks: [isNotEmpty, validOllamaLLMBasePath, validDockerizedUrl],
  100. },
  101. OllamaLLMModelPref: {
  102. envKey: "OLLAMA_MODEL_PREF",
  103. checks: [],
  104. },
  105. OllamaLLMTokenLimit: {
  106. envKey: "OLLAMA_MODEL_TOKEN_LIMIT",
  107. checks: [nonZero],
  108. },
  109. OllamaLLMPerformanceMode: {
  110. envKey: "OLLAMA_PERFORMANCE_MODE",
  111. checks: [],
  112. },
  113. OllamaLLMKeepAliveSeconds: {
  114. envKey: "OLLAMA_KEEP_ALIVE_TIMEOUT",
  115. checks: [isInteger],
  116. },
  117. // Mistral AI API Settings
  118. MistralApiKey: {
  119. envKey: "MISTRAL_API_KEY",
  120. checks: [isNotEmpty],
  121. },
  122. MistralModelPref: {
  123. envKey: "MISTRAL_MODEL_PREF",
  124. checks: [isNotEmpty],
  125. },
  126. // Hugging Face LLM Inference Settings
  127. HuggingFaceLLMEndpoint: {
  128. envKey: "HUGGING_FACE_LLM_ENDPOINT",
  129. checks: [isNotEmpty, isValidURL, validHuggingFaceEndpoint],
  130. },
  131. HuggingFaceLLMAccessToken: {
  132. envKey: "HUGGING_FACE_LLM_API_KEY",
  133. checks: [isNotEmpty],
  134. },
  135. HuggingFaceLLMTokenLimit: {
  136. envKey: "HUGGING_FACE_LLM_TOKEN_LIMIT",
  137. checks: [nonZero],
  138. },
  139. // KoboldCPP Settings
  140. KoboldCPPBasePath: {
  141. envKey: "KOBOLD_CPP_BASE_PATH",
  142. checks: [isNotEmpty, isValidURL],
  143. },
  144. KoboldCPPModelPref: {
  145. envKey: "KOBOLD_CPP_MODEL_PREF",
  146. checks: [isNotEmpty],
  147. },
  148. KoboldCPPTokenLimit: {
  149. envKey: "KOBOLD_CPP_MODEL_TOKEN_LIMIT",
  150. checks: [nonZero],
  151. },
  152. // Text Generation Web UI Settings
  153. TextGenWebUIBasePath: {
  154. envKey: "TEXT_GEN_WEB_UI_BASE_PATH",
  155. checks: [isValidURL],
  156. },
  157. TextGenWebUITokenLimit: {
  158. envKey: "TEXT_GEN_WEB_UI_MODEL_TOKEN_LIMIT",
  159. checks: [nonZero],
  160. },
  161. TextGenWebUIAPIKey: {
  162. envKey: "TEXT_GEN_WEB_UI_API_KEY",
  163. checks: [],
  164. },
  165. // LiteLLM Settings
  166. LiteLLMModelPref: {
  167. envKey: "LITE_LLM_MODEL_PREF",
  168. checks: [isNotEmpty],
  169. },
  170. LiteLLMTokenLimit: {
  171. envKey: "LITE_LLM_MODEL_TOKEN_LIMIT",
  172. checks: [nonZero],
  173. },
  174. LiteLLMBasePath: {
  175. envKey: "LITE_LLM_BASE_PATH",
  176. checks: [isValidURL],
  177. },
  178. LiteLLMApiKey: {
  179. envKey: "LITE_LLM_API_KEY",
  180. checks: [],
  181. },
  182. // Generic OpenAI InferenceSettings
  183. GenericOpenAiBasePath: {
  184. envKey: "GENERIC_OPEN_AI_BASE_PATH",
  185. checks: [isValidURL],
  186. },
  187. GenericOpenAiModelPref: {
  188. envKey: "GENERIC_OPEN_AI_MODEL_PREF",
  189. checks: [isNotEmpty],
  190. },
  191. GenericOpenAiTokenLimit: {
  192. envKey: "GENERIC_OPEN_AI_MODEL_TOKEN_LIMIT",
  193. checks: [nonZero],
  194. },
  195. GenericOpenAiKey: {
  196. envKey: "GENERIC_OPEN_AI_API_KEY",
  197. checks: [],
  198. },
  199. GenericOpenAiMaxTokens: {
  200. envKey: "GENERIC_OPEN_AI_MAX_TOKENS",
  201. checks: [nonZero],
  202. },
  203. // AWS Bedrock LLM InferenceSettings
  204. AwsBedrockLLMConnectionMethod: {
  205. envKey: "AWS_BEDROCK_LLM_CONNECTION_METHOD",
  206. checks: [
  207. (input) =>
  208. ["iam", "sessionToken"].includes(input) ? null : "Invalid value",
  209. ],
  210. },
  211. AwsBedrockLLMAccessKeyId: {
  212. envKey: "AWS_BEDROCK_LLM_ACCESS_KEY_ID",
  213. checks: [isNotEmpty],
  214. },
  215. AwsBedrockLLMAccessKey: {
  216. envKey: "AWS_BEDROCK_LLM_ACCESS_KEY",
  217. checks: [isNotEmpty],
  218. },
  219. AwsBedrockLLMSessionToken: {
  220. envKey: "AWS_BEDROCK_LLM_SESSION_TOKEN",
  221. checks: [],
  222. },
  223. AwsBedrockLLMRegion: {
  224. envKey: "AWS_BEDROCK_LLM_REGION",
  225. checks: [isNotEmpty],
  226. },
  227. AwsBedrockLLMModel: {
  228. envKey: "AWS_BEDROCK_LLM_MODEL_PREFERENCE",
  229. checks: [isNotEmpty],
  230. },
  231. AwsBedrockLLMTokenLimit: {
  232. envKey: "AWS_BEDROCK_LLM_MODEL_TOKEN_LIMIT",
  233. checks: [nonZero],
  234. },
  235. EmbeddingEngine: {
  236. envKey: "EMBEDDING_ENGINE",
  237. checks: [supportedEmbeddingModel],
  238. postUpdate: [handleVectorStoreReset],
  239. },
  240. EmbeddingBasePath: {
  241. envKey: "EMBEDDING_BASE_PATH",
  242. checks: [isNotEmpty, validDockerizedUrl],
  243. },
  244. EmbeddingModelPref: {
  245. envKey: "EMBEDDING_MODEL_PREF",
  246. checks: [isNotEmpty],
  247. postUpdate: [handleVectorStoreReset],
  248. },
  249. EmbeddingModelMaxChunkLength: {
  250. envKey: "EMBEDDING_MODEL_MAX_CHUNK_LENGTH",
  251. checks: [nonZero],
  252. },
  253. // Gemini Embedding Settings
  254. GeminiEmbeddingApiKey: {
  255. envKey: "GEMINI_EMBEDDING_API_KEY",
  256. checks: [isNotEmpty],
  257. },
  258. // Generic OpenAI Embedding Settings
  259. GenericOpenAiEmbeddingApiKey: {
  260. envKey: "GENERIC_OPEN_AI_EMBEDDING_API_KEY",
  261. checks: [],
  262. },
  263. GenericOpenAiEmbeddingMaxConcurrentChunks: {
  264. envKey: "GENERIC_OPEN_AI_EMBEDDING_MAX_CONCURRENT_CHUNKS",
  265. checks: [nonZero],
  266. },
  267. // Vector Database Selection Settings
  268. VectorDB: {
  269. envKey: "VECTOR_DB",
  270. checks: [isNotEmpty, supportedVectorDB],
  271. postUpdate: [handleVectorStoreReset],
  272. },
  273. // Chroma Options
  274. ChromaEndpoint: {
  275. envKey: "CHROMA_ENDPOINT",
  276. checks: [isValidURL, validChromaURL, validDockerizedUrl],
  277. },
  278. ChromaApiHeader: {
  279. envKey: "CHROMA_API_HEADER",
  280. checks: [],
  281. },
  282. ChromaApiKey: {
  283. envKey: "CHROMA_API_KEY",
  284. checks: [],
  285. },
  286. // Weaviate Options
  287. WeaviateEndpoint: {
  288. envKey: "WEAVIATE_ENDPOINT",
  289. checks: [isValidURL, validDockerizedUrl],
  290. },
  291. WeaviateApiKey: {
  292. envKey: "WEAVIATE_API_KEY",
  293. checks: [],
  294. },
  295. // QDrant Options
  296. QdrantEndpoint: {
  297. envKey: "QDRANT_ENDPOINT",
  298. checks: [isValidURL, validDockerizedUrl],
  299. },
  300. QdrantApiKey: {
  301. envKey: "QDRANT_API_KEY",
  302. checks: [],
  303. },
  304. PineConeKey: {
  305. envKey: "PINECONE_API_KEY",
  306. checks: [],
  307. },
  308. PineConeIndex: {
  309. envKey: "PINECONE_INDEX",
  310. checks: [],
  311. },
  312. // Milvus Options
  313. MilvusAddress: {
  314. envKey: "MILVUS_ADDRESS",
  315. checks: [isValidURL, validDockerizedUrl],
  316. },
  317. MilvusUsername: {
  318. envKey: "MILVUS_USERNAME",
  319. checks: [isNotEmpty],
  320. },
  321. MilvusPassword: {
  322. envKey: "MILVUS_PASSWORD",
  323. checks: [isNotEmpty],
  324. },
  325. // Zilliz Cloud Options
  326. ZillizEndpoint: {
  327. envKey: "ZILLIZ_ENDPOINT",
  328. checks: [isValidURL],
  329. },
  330. ZillizApiToken: {
  331. envKey: "ZILLIZ_API_TOKEN",
  332. checks: [isNotEmpty],
  333. },
  334. // Astra DB Options
  335. AstraDBApplicationToken: {
  336. envKey: "ASTRA_DB_APPLICATION_TOKEN",
  337. checks: [isNotEmpty],
  338. },
  339. AstraDBEndpoint: {
  340. envKey: "ASTRA_DB_ENDPOINT",
  341. checks: [isNotEmpty],
  342. },
  343. // Together Ai Options
  344. TogetherAiApiKey: {
  345. envKey: "TOGETHER_AI_API_KEY",
  346. checks: [isNotEmpty],
  347. },
  348. TogetherAiModelPref: {
  349. envKey: "TOGETHER_AI_MODEL_PREF",
  350. checks: [isNotEmpty],
  351. },
  352. // Fireworks AI Options
  353. FireworksAiLLMApiKey: {
  354. envKey: "FIREWORKS_AI_LLM_API_KEY",
  355. checks: [isNotEmpty],
  356. },
  357. FireworksAiLLMModelPref: {
  358. envKey: "FIREWORKS_AI_LLM_MODEL_PREF",
  359. checks: [isNotEmpty],
  360. },
  361. // Perplexity Options
  362. PerplexityApiKey: {
  363. envKey: "PERPLEXITY_API_KEY",
  364. checks: [isNotEmpty],
  365. },
  366. PerplexityModelPref: {
  367. envKey: "PERPLEXITY_MODEL_PREF",
  368. checks: [isNotEmpty],
  369. },
  370. // OpenRouter Options
  371. OpenRouterApiKey: {
  372. envKey: "OPENROUTER_API_KEY",
  373. checks: [isNotEmpty],
  374. },
  375. OpenRouterModelPref: {
  376. envKey: "OPENROUTER_MODEL_PREF",
  377. checks: [isNotEmpty],
  378. },
  379. OpenRouterTimeout: {
  380. envKey: "OPENROUTER_TIMEOUT_MS",
  381. checks: [],
  382. },
  383. // Novita Options
  384. NovitaLLMApiKey: {
  385. envKey: "NOVITA_LLM_API_KEY",
  386. checks: [isNotEmpty],
  387. },
  388. NovitaLLMModelPref: {
  389. envKey: "NOVITA_LLM_MODEL_PREF",
  390. checks: [isNotEmpty],
  391. },
  392. NovitaLLMTimeout: {
  393. envKey: "NOVITA_LLM_TIMEOUT_MS",
  394. checks: [],
  395. },
  396. // Groq Options
  397. GroqApiKey: {
  398. envKey: "GROQ_API_KEY",
  399. checks: [isNotEmpty],
  400. },
  401. GroqModelPref: {
  402. envKey: "GROQ_MODEL_PREF",
  403. checks: [isNotEmpty],
  404. },
  405. // Cohere Options
  406. CohereApiKey: {
  407. envKey: "COHERE_API_KEY",
  408. checks: [isNotEmpty],
  409. },
  410. CohereModelPref: {
  411. envKey: "COHERE_MODEL_PREF",
  412. checks: [isNotEmpty],
  413. },
  414. // VoyageAi Options
  415. VoyageAiApiKey: {
  416. envKey: "VOYAGEAI_API_KEY",
  417. checks: [isNotEmpty],
  418. },
  419. // Whisper (transcription) providers
  420. WhisperProvider: {
  421. envKey: "WHISPER_PROVIDER",
  422. checks: [isNotEmpty, supportedTranscriptionProvider],
  423. postUpdate: [],
  424. },
  425. WhisperModelPref: {
  426. envKey: "WHISPER_MODEL_PREF",
  427. checks: [validLocalWhisper],
  428. postUpdate: [],
  429. },
  430. // System Settings
  431. AuthToken: {
  432. envKey: "AUTH_TOKEN",
  433. checks: [requiresForceMode, noRestrictedChars],
  434. },
  435. JWTSecret: {
  436. envKey: "JWT_SECRET",
  437. checks: [requiresForceMode],
  438. },
  439. DisableTelemetry: {
  440. envKey: "DISABLE_TELEMETRY",
  441. checks: [],
  442. },
  443. // Agent Integration ENVs
  444. AgentGoogleSearchEngineId: {
  445. envKey: "AGENT_GSE_CTX",
  446. checks: [],
  447. },
  448. AgentGoogleSearchEngineKey: {
  449. envKey: "AGENT_GSE_KEY",
  450. checks: [],
  451. },
  452. AgentSearchApiKey: {
  453. envKey: "AGENT_SEARCHAPI_API_KEY",
  454. checks: [],
  455. },
  456. AgentSearchApiEngine: {
  457. envKey: "AGENT_SEARCHAPI_ENGINE",
  458. checks: [],
  459. },
  460. AgentSerperApiKey: {
  461. envKey: "AGENT_SERPER_DEV_KEY",
  462. checks: [],
  463. },
  464. AgentBingSearchApiKey: {
  465. envKey: "AGENT_BING_SEARCH_API_KEY",
  466. checks: [],
  467. },
  468. AgentSerplyApiKey: {
  469. envKey: "AGENT_SERPLY_API_KEY",
  470. checks: [],
  471. },
  472. AgentSearXNGApiUrl: {
  473. envKey: "AGENT_SEARXNG_API_URL",
  474. checks: [],
  475. },
  476. AgentTavilyApiKey: {
  477. envKey: "AGENT_TAVILY_API_KEY",
  478. checks: [],
  479. },
  480. // TTS/STT Integration ENVS
  481. TextToSpeechProvider: {
  482. envKey: "TTS_PROVIDER",
  483. checks: [supportedTTSProvider],
  484. },
  485. // TTS OpenAI
  486. TTSOpenAIKey: {
  487. envKey: "TTS_OPEN_AI_KEY",
  488. checks: [validOpenAIKey],
  489. },
  490. TTSOpenAIVoiceModel: {
  491. envKey: "TTS_OPEN_AI_VOICE_MODEL",
  492. checks: [],
  493. },
  494. // TTS ElevenLabs
  495. TTSElevenLabsKey: {
  496. envKey: "TTS_ELEVEN_LABS_KEY",
  497. checks: [isNotEmpty],
  498. },
  499. TTSElevenLabsVoiceModel: {
  500. envKey: "TTS_ELEVEN_LABS_VOICE_MODEL",
  501. checks: [],
  502. },
  503. // PiperTTS Local
  504. TTSPiperTTSVoiceModel: {
  505. envKey: "TTS_PIPER_VOICE_MODEL",
  506. checks: [],
  507. },
  508. // OpenAI Generic TTS
  509. TTSOpenAICompatibleKey: {
  510. envKey: "TTS_OPEN_AI_COMPATIBLE_KEY",
  511. checks: [],
  512. },
  513. TTSOpenAICompatibleVoiceModel: {
  514. envKey: "TTS_OPEN_AI_COMPATIBLE_VOICE_MODEL",
  515. checks: [isNotEmpty],
  516. },
  517. TTSOpenAICompatibleEndpoint: {
  518. envKey: "TTS_OPEN_AI_COMPATIBLE_ENDPOINT",
  519. checks: [isValidURL],
  520. },
  521. // DeepSeek Options
  522. DeepSeekApiKey: {
  523. envKey: "DEEPSEEK_API_KEY",
  524. checks: [isNotEmpty],
  525. },
  526. DeepSeekModelPref: {
  527. envKey: "DEEPSEEK_MODEL_PREF",
  528. checks: [isNotEmpty],
  529. },
  530. // APIPie Options
  531. ApipieLLMApiKey: {
  532. envKey: "APIPIE_LLM_API_KEY",
  533. checks: [isNotEmpty],
  534. },
  535. ApipieLLMModelPref: {
  536. envKey: "APIPIE_LLM_MODEL_PREF",
  537. checks: [isNotEmpty],
  538. },
  539. // xAI Options
  540. XAIApiKey: {
  541. envKey: "XAI_LLM_API_KEY",
  542. checks: [isNotEmpty],
  543. },
  544. XAIModelPref: {
  545. envKey: "XAI_LLM_MODEL_PREF",
  546. checks: [isNotEmpty],
  547. },
  548. // Nvidia NIM Options
  549. NvidiaNimLLMBasePath: {
  550. envKey: "NVIDIA_NIM_LLM_BASE_PATH",
  551. checks: [isValidURL],
  552. postUpdate: [
  553. (_, __, nextValue) => {
  554. const { parseNvidiaNimBasePath } = require("../AiProviders/nvidiaNim");
  555. process.env.NVIDIA_NIM_LLM_BASE_PATH =
  556. parseNvidiaNimBasePath(nextValue);
  557. },
  558. ],
  559. },
  560. NvidiaNimLLMModelPref: {
  561. envKey: "NVIDIA_NIM_LLM_MODEL_PREF",
  562. checks: [],
  563. postUpdate: [
  564. async (_, __, nextValue) => {
  565. const { NvidiaNimLLM } = require("../AiProviders/nvidiaNim");
  566. await NvidiaNimLLM.setModelTokenLimit(nextValue);
  567. },
  568. ],
  569. },
  570. };
  571. function isNotEmpty(input = "") {
  572. return !input || input.length === 0 ? "Value cannot be empty" : null;
  573. }
  574. function nonZero(input = "") {
  575. if (isNaN(Number(input))) return "Value must be a number";
  576. return Number(input) <= 0 ? "Value must be greater than zero" : null;
  577. }
  578. function isInteger(input = "") {
  579. if (isNaN(Number(input))) return "Value must be a number";
  580. return Number(input);
  581. }
  582. function isValidURL(input = "") {
  583. try {
  584. new URL(input);
  585. return null;
  586. } catch (e) {
  587. return "URL is not a valid URL.";
  588. }
  589. }
  590. function validOpenAIKey(input = "") {
  591. return input.startsWith("sk-") ? null : "OpenAI Key must start with sk-";
  592. }
  593. function validAnthropicApiKey(input = "") {
  594. return input.startsWith("sk-ant-")
  595. ? null
  596. : "Anthropic Key must start with sk-ant-";
  597. }
  598. function validLLMExternalBasePath(input = "") {
  599. try {
  600. new URL(input);
  601. if (!input.includes("v1")) return "URL must include /v1";
  602. if (input.split("").slice(-1)?.[0] === "/")
  603. return "URL cannot end with a slash";
  604. return null;
  605. } catch {
  606. return "Not a valid URL";
  607. }
  608. }
  609. function validOllamaLLMBasePath(input = "") {
  610. try {
  611. new URL(input);
  612. if (input.split("").slice(-1)?.[0] === "/")
  613. return "URL cannot end with a slash";
  614. return null;
  615. } catch {
  616. return "Not a valid URL";
  617. }
  618. }
  619. function supportedTTSProvider(input = "") {
  620. const validSelection = [
  621. "native",
  622. "openai",
  623. "elevenlabs",
  624. "piper_local",
  625. "generic-openai",
  626. ].includes(input);
  627. return validSelection ? null : `${input} is not a valid TTS provider.`;
  628. }
  629. function validLocalWhisper(input = "") {
  630. const validSelection = [
  631. "Xenova/whisper-small",
  632. "Xenova/whisper-large",
  633. ].includes(input);
  634. return validSelection
  635. ? null
  636. : `${input} is not a valid Whisper model selection.`;
  637. }
  638. function supportedLLM(input = "") {
  639. const validSelection = [
  640. "openai",
  641. "azure",
  642. "anthropic",
  643. "gemini",
  644. "lmstudio",
  645. "localai",
  646. "ollama",
  647. "togetherai",
  648. "fireworksai",
  649. "mistral",
  650. "huggingface",
  651. "perplexity",
  652. "openrouter",
  653. "novita",
  654. "groq",
  655. "koboldcpp",
  656. "textgenwebui",
  657. "cohere",
  658. "litellm",
  659. "generic-openai",
  660. "bedrock",
  661. "deepseek",
  662. "apipie",
  663. "xai",
  664. "nvidia-nim",
  665. ].includes(input);
  666. return validSelection ? null : `${input} is not a valid LLM provider.`;
  667. }
  668. function supportedTranscriptionProvider(input = "") {
  669. const validSelection = ["openai", "local"].includes(input);
  670. return validSelection
  671. ? null
  672. : `${input} is not a valid transcription model provider.`;
  673. }
  674. function validGeminiSafetySetting(input = "") {
  675. const validModes = [
  676. "BLOCK_NONE",
  677. "BLOCK_ONLY_HIGH",
  678. "BLOCK_MEDIUM_AND_ABOVE",
  679. "BLOCK_LOW_AND_ABOVE",
  680. ];
  681. return validModes.includes(input)
  682. ? null
  683. : `Invalid Safety setting. Must be one of ${validModes.join(", ")}.`;
  684. }
  685. function validAnthropicModel(input = "") {
  686. const validModels = [
  687. "claude-instant-1.2",
  688. "claude-2.0",
  689. "claude-2.1",
  690. "claude-3-haiku-20240307",
  691. "claude-3-sonnet-20240229",
  692. "claude-3-opus-latest",
  693. "claude-3-5-haiku-latest",
  694. "claude-3-5-haiku-20241022",
  695. "claude-3-5-sonnet-latest",
  696. "claude-3-5-sonnet-20241022",
  697. "claude-3-5-sonnet-20240620",
  698. ];
  699. return validModels.includes(input)
  700. ? null
  701. : `Invalid Model type. Must be one of ${validModels.join(", ")}.`;
  702. }
  703. function supportedEmbeddingModel(input = "") {
  704. const supported = [
  705. "openai",
  706. "azure",
  707. "gemini",
  708. "localai",
  709. "native",
  710. "ollama",
  711. "lmstudio",
  712. "cohere",
  713. "voyageai",
  714. "litellm",
  715. "generic-openai",
  716. "mistral",
  717. ];
  718. return supported.includes(input)
  719. ? null
  720. : `Invalid Embedding model type. Must be one of ${supported.join(", ")}.`;
  721. }
  722. function supportedVectorDB(input = "") {
  723. const supported = [
  724. "chroma",
  725. "pinecone",
  726. "lancedb",
  727. "weaviate",
  728. "qdrant",
  729. "milvus",
  730. "zilliz",
  731. "astra",
  732. ];
  733. return supported.includes(input)
  734. ? null
  735. : `Invalid VectorDB type. Must be one of ${supported.join(", ")}.`;
  736. }
  737. function validChromaURL(input = "") {
  738. return input.slice(-1) === "/"
  739. ? `Chroma Instance URL should not end in a trailing slash.`
  740. : null;
  741. }
  742. function validOpenAiTokenLimit(input = "") {
  743. const tokenLimit = Number(input);
  744. if (isNaN(tokenLimit)) return "Token limit is not a number";
  745. if (![4_096, 16_384, 8_192, 32_768, 128_000].includes(tokenLimit))
  746. return "Invalid OpenAI token limit.";
  747. return null;
  748. }
  749. function requiresForceMode(_, forceModeEnabled = false) {
  750. return forceModeEnabled === true ? null : "Cannot set this setting.";
  751. }
  752. async function validDockerizedUrl(input = "") {
  753. if (process.env.ANYTHING_LLM_RUNTIME !== "docker") return null;
  754. try {
  755. const { isPortInUse, getLocalHosts } = require("./portAvailabilityChecker");
  756. const localInterfaces = getLocalHosts();
  757. const url = new URL(input);
  758. const hostname = url.hostname.toLowerCase();
  759. const port = parseInt(url.port, 10);
  760. // If not a loopback, skip this check.
  761. if (!localInterfaces.includes(hostname)) return null;
  762. if (isNaN(port)) return "Invalid URL: Port is not specified or invalid";
  763. const isPortAvailableFromDocker = await isPortInUse(port, hostname);
  764. if (isPortAvailableFromDocker)
  765. return "Port is not running a reachable service on loopback address from inside the AnythingLLM container. Please use host.docker.internal (for linux use 172.17.0.1), a real machine ip, or domain to connect to your service.";
  766. } catch (error) {
  767. console.error(error.message);
  768. return "An error occurred while validating the URL";
  769. }
  770. return null;
  771. }
  772. function validHuggingFaceEndpoint(input = "") {
  773. return input.slice(-6) !== ".cloud"
  774. ? `Your HF Endpoint should end in ".cloud"`
  775. : null;
  776. }
  777. function noRestrictedChars(input = "") {
  778. const regExp = new RegExp(/^[a-zA-Z0-9_\-!@$%^&*();]+$/);
  779. return !regExp.test(input)
  780. ? `Your password has restricted characters in it. Allowed symbols are _,-,!,@,$,%,^,&,*,(,),;`
  781. : null;
  782. }
  783. async function handleVectorStoreReset(key, prevValue, nextValue) {
  784. if (prevValue === nextValue) return;
  785. if (key === "VectorDB") {
  786. console.log(
  787. `Vector configuration changed from ${prevValue} to ${nextValue} - resetting ${prevValue} namespaces`
  788. );
  789. return await resetAllVectorStores({ vectorDbKey: prevValue });
  790. }
  791. if (key === "EmbeddingEngine" || key === "EmbeddingModelPref") {
  792. console.log(
  793. `${key} changed from ${prevValue} to ${nextValue} - resetting ${process.env.VECTOR_DB} namespaces`
  794. );
  795. return await resetAllVectorStores({ vectorDbKey: process.env.VECTOR_DB });
  796. }
  797. return false;
  798. }
  799. // This will force update .env variables which for any which reason were not able to be parsed or
  800. // read from an ENV file as this seems to be a complicating step for many so allowing people to write
  801. // to the process will at least alleviate that issue. It does not perform comprehensive validity checks or sanity checks
  802. // and is simply for debugging when the .env not found issue many come across.
  803. async function updateENV(newENVs = {}, force = false, userId = null) {
  804. let error = "";
  805. const validKeys = Object.keys(KEY_MAPPING);
  806. const ENV_KEYS = Object.keys(newENVs).filter(
  807. (key) => validKeys.includes(key) && !newENVs[key].includes("******") // strip out answers where the value is all asterisks
  808. );
  809. const newValues = {};
  810. for (const key of ENV_KEYS) {
  811. const { envKey, checks, postUpdate = [] } = KEY_MAPPING[key];
  812. const prevValue = process.env[envKey];
  813. const nextValue = newENVs[key];
  814. const errors = await executeValidationChecks(checks, nextValue, force);
  815. if (errors.length > 0) {
  816. error += errors.join("\n");
  817. break;
  818. }
  819. newValues[key] = nextValue;
  820. process.env[envKey] = nextValue;
  821. for (const postUpdateFunc of postUpdate)
  822. await postUpdateFunc(key, prevValue, nextValue);
  823. }
  824. await logChangesToEventLog(newValues, userId);
  825. if (process.env.NODE_ENV === "production") dumpENV();
  826. return { newValues, error: error?.length > 0 ? error : false };
  827. }
  828. async function executeValidationChecks(checks, value, force) {
  829. const results = await Promise.all(
  830. checks.map((validator) => validator(value, force))
  831. );
  832. return results.filter((err) => typeof err === "string");
  833. }
  834. async function logChangesToEventLog(newValues = {}, userId = null) {
  835. const { EventLogs } = require("../../models/eventLogs");
  836. const eventMapping = {
  837. LLMProvider: "update_llm_provider",
  838. EmbeddingEngine: "update_embedding_engine",
  839. VectorDB: "update_vector_db",
  840. };
  841. for (const [key, eventName] of Object.entries(eventMapping)) {
  842. if (!newValues.hasOwnProperty(key)) continue;
  843. await EventLogs.logEvent(eventName, {}, userId);
  844. }
  845. return;
  846. }
  847. function dumpENV() {
  848. const fs = require("fs");
  849. const path = require("path");
  850. const frozenEnvs = {};
  851. const protectedKeys = [
  852. ...Object.values(KEY_MAPPING).map((values) => values.envKey),
  853. // Manually Add Keys here which are not already defined in KEY_MAPPING
  854. // and are either managed or manually set ENV key:values.
  855. "STORAGE_DIR",
  856. "SERVER_PORT",
  857. // For persistent data encryption
  858. "SIG_KEY",
  859. "SIG_SALT",
  860. // Password Schema Keys if present.
  861. "PASSWORDMINCHAR",
  862. "PASSWORDMAXCHAR",
  863. "PASSWORDLOWERCASE",
  864. "PASSWORDUPPERCASE",
  865. "PASSWORDNUMERIC",
  866. "PASSWORDSYMBOL",
  867. "PASSWORDREQUIREMENTS",
  868. // HTTPS SETUP KEYS
  869. "ENABLE_HTTPS",
  870. "HTTPS_CERT_PATH",
  871. "HTTPS_KEY_PATH",
  872. // Other Configuration Keys
  873. "DISABLE_VIEW_CHAT_HISTORY",
  874. // Simple SSO
  875. "SIMPLE_SSO_ENABLED",
  876. // Community Hub
  877. "COMMUNITY_HUB_BUNDLE_DOWNLOADS_ENABLED",
  878. // Nvidia NIM Keys that are automatically managed
  879. "NVIDIA_NIM_LLM_MODEL_TOKEN_LIMIT",
  880. ];
  881. // Simple sanitization of each value to prevent ENV injection via newline or quote escaping.
  882. function sanitizeValue(value) {
  883. const offendingChars =
  884. /[\n\r\t\v\f\u0085\u00a0\u1680\u180e\u2000-\u200a\u2028\u2029\u202f\u205f\u3000"'`#]/;
  885. const firstOffendingCharIndex = value.search(offendingChars);
  886. if (firstOffendingCharIndex === -1) return value;
  887. return value.substring(0, firstOffendingCharIndex);
  888. }
  889. for (const key of protectedKeys) {
  890. const envValue = process.env?.[key] || null;
  891. if (!envValue) continue;
  892. frozenEnvs[key] = process.env?.[key] || null;
  893. }
  894. var envResult = `# Auto-dump ENV from system call on ${new Date().toTimeString()}\n`;
  895. envResult += Object.entries(frozenEnvs)
  896. .map(([key, value]) => `${key}='${sanitizeValue(value)}'`)
  897. .join("\n");
  898. const envPath = path.join(__dirname, "../../.env");
  899. fs.writeFileSync(envPath, envResult, { encoding: "utf8", flag: "w" });
  900. return true;
  901. }
  902. module.exports = {
  903. dumpENV,
  904. updateENV,
  905. };