forked from ScottLogic/InferESG
-
Notifications
You must be signed in to change notification settings - Fork 5
Expand file tree
/
Copy path.env.example
More file actions
82 lines (66 loc) · 2.39 KB
/
.env.example
File metadata and controls
82 lines (66 loc) · 2.39 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
# neo4j authentication
NEO4J_USERNAME=neo4j
NEO4J_PASSWORD=change-me!
# neo4j graph database URI used by the backend to connect to neo4j
# use "bolt://localhost" when the backend and neo4j are both running locally outside Docker
# use "bolt://host.docker.internal" when the backend is running within Docker but neo4j is running locally (outside Docker)
# URI will be set to the neo4j container's host if using Docker Compose
NEO4J_URI=bolt://localhost:7687
# port configuration is optional
# used with Docker Compose to expose neo4j on non-default ports
NEO4J_HTTP_PORT=7474
NEO4J_BOLT_PORT=7687
# files location
FILES_DIRECTORY=files
# redis cache configuration
REDIS_HOST="localhost"
# backend LLM properties
MISTRAL_KEY=my-api-key
# OpenAI LLM properties
OPENAI_KEY=my-openai-api-key
# frontend host - used to configure backend CORS
FRONTEND_URL=http://localhost:8650
# LM Studio properties - for local running LLM models
# Use host.docker.internal instead of 127.0.0.1 when running in Docker
# The base URL without the /v1 path (this will be added in the code)
LMSTUDIO_URL=http://host.docker.internal:1234
LMSTUDIO_MODEL=liquid/lfm2-1.2b
LMSTUDIO_MAX_TOKENS=1024
# what backend URL should be used by frontend API requests
BACKEND_URL=http://localhost:8250
# websockets url to connect to backend websocket endpoint
WS_URL=ws://localhost:8250/ws
# llm
ANSWER_AGENT_LLM="mistral"
INTENT_AGENT_LLM="openai"
REPORT_AGENT_LLM="openai"
MATERIALITY_AGENT_LLM="openai"
VALIDATOR_AGENT_LLM="openai"
DATASTORE_AGENT_LLM="openai"
WEB_AGENT_LLM="openai"
CHART_GENERATOR_LLM="openai"
ROUTER_LLM="openai"
FILE_AGENT_LLM="openai"
SUGGESTIONS_LLM="openai"
DYNAMIC_KNOWLEDGE_GRAPH_LLM="openai"
GENERALIST_AGENT_LLM="openai"
# model
ANSWER_AGENT_MODEL="mistral-large-latest"
INTENT_AGENT_MODEL="gpt-4o"
REPORT_AGENT_MODEL="gpt-4o"
MATERIALITY_AGENT_MODEL="gpt-4o"
VALIDATOR_AGENT_MODEL="gpt-4o-mini"
DATASTORE_AGENT_MODEL="gpt-4o"
WEB_AGENT_MODEL="gpt-4o"
CHART_GENERATOR_MODEL="gpt-4o-mini"
ROUTER_MODEL="gpt-4o"
FILE_AGENT_MODEL="gpt-4o"
SUGGESTIONS_MODEL="gpt-4o-mini"
DYNAMIC_KNOWLEDGE_GRAPH_MODEL="gpt-4o"
GENERALIST_AGENT_MODEL="gpt-4o"
# Set this to restrict which chat agents are available for solving questions.
# ALLOWED_CHAT_AGENTS="DatastoreAgent,WebAgent,MaterialityAgent,FileAgent"
# Customise location of LLM usage log file
# LLM_USAGE_LOG_FILENAME="test.csv"
# OpenAI configuration for RAGAS evaluations
RAGAS_OPENAI_MODEL=gpt-4o