-
Notifications
You must be signed in to change notification settings - Fork 908
Expand file tree
/
Copy path.env.example
More file actions
316 lines (268 loc) · 12.1 KB
/
.env.example
File metadata and controls
316 lines (268 loc) · 12.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
# Docker Compose environment variable configuration example
# Copy this file to .env and modify the configuration as needed
# ============================================================================
# Image Version Configuration
# ============================================================================
# astron-agent image version tag (default: latest)
ASTRON_AGENT_VERSION=latest
# ============================================================================
# Middleware Configuration
# ============================================================================
# PostgreSQL Configuration
POSTGRES_USER=spark
POSTGRES_PASSWORD=spark123
# PostgreSQL connection configuration. If deploying middleware independently, modify the following configuration; otherwise, use default
POSTGRES_HOST=postgres
POSTGRES_PORT=5432
# MySQL Configuration
MYSQL_ROOT_PASSWORD=root123
# MySQL connection configuration. If deploying middleware independently, modify the following configuration; otherwise, use default
MYSQL_USER=root
MYSQL_PASSWORD=${MYSQL_ROOT_PASSWORD:-root123}
MYSQL_HOST=mysql
MYSQL_PORT=3306
MYSQL_URL=jdbc:mysql://mysql:3306/astron_console?useSSL=false&allowPublicKeyRetrieval=true&serverTimezone=Asia/Shanghai&characterEncoding=utf8&createDatabaseIfNotExist=true
# Redis Configuration
REDIS_PASSWORD=123
REDIS_DATABASE=0
REDIS_IS_CLUSTER=false
REDIS_CLUSTER_ADDR=
REDIS_EXPIRE=3600
# Redis connection configuration. If deploying middleware independently, modify the following configuration; otherwise, use default
REDIS_ADDR=redis:6379
REDIS_HOST=redis
REDIS_PORT=6379
# Elasticsearch Configuration
ELASTICSEARCH_SECURITY_ENABLED=false
ES_JAVA_OPTS='-Xms512m -Xmx512m'
# Kafka Configuration
KAFKA_ENABLE=0
KAFKA_REPLICATION_FACTOR=1
KAFKA_CLUSTER_ID=MkU3OEVBNTcwNTJENDM2Qk
KAFKA_TIMEOUT=60
# Kafka connection configuration. If deploying middleware independently, modify the following configuration; otherwise, use default
KAFKA_SERVERS=kafka:29092
# MinIO Configuration
MINIO_ROOT_USER=minioadmin
MINIO_ROOT_PASSWORD=minioadmin123
EXPOSE_MINIO_PORT=18998
EXPOSE_MINIO_CONSOLE_PORT=18999
# OSS Configuration (can be replaced with your own OSS system)
OSS_TYPE=s3
OSS_ENDPOINT=http://minio:9000
OSS_ACCESS_KEY_ID=${MINIO_ROOT_USER:-minioadmin}
OSS_ACCESS_KEY_SECRET=${MINIO_ROOT_PASSWORD:-minioadmin123}
OSS_BUCKET_NAME=workflow
OSS_TTL=157788000
OSS_DOWNLOAD_HOST=http://minio:9000
# OTLP Address
OTLP_ENABLE=0
OTLP_ENDPOINT=127.0.0.1:4317
OTLP_METRIC_TIMEOUT=3000
OTLP_METRIC_EXPORT_INTERVAL_MILLIS=3000
OTLP_METRIC_EXPORT_TIMEOUT_MILLIS=3000
# Tracing Configuration
OTLP_TRACE_TIMEOUT=3000
OTLP_TRACE_MAX_QUEUE_SIZE=2048
OTLP_TRACE_SCHEDULE_DELAY_MILLIS=3000
OTLP_TRACE_MAX_EXPORT_BATCH_SIZE=2048
OTLP_TRACE_EXPORT_TIMEOUT_MILLIS=3000
# Nginx configuration
EXPOSE_NGINX_PORT=80
HOST_BASE_ADDRESS=http://localhost
# ============================================================================
# astron-agent Application Port Configuration
# ============================================================================
# Core Service Port Configuration
CASDOOR_PORT=8000
# Core Service Port Configuration
CORE_TENANT_PORT=5052
CORE_DATABASE_PORT=7990
CORE_RPA_PORT=17198
CORE_LINK_PORT=18888
CORE_AITOOLS_PORT=18668
CORE_AGENT_PORT=17870
CORE_KNOWLEDGE_PORT=20010
CORE_WORKFLOW_PORT=7880
# Console Frontend Casdoor Configuration
# These variables are prioritized over VITE_CASDOOR_* equivalents in frontend builds
# Note: The CONSOLE_DOMAIN variable is used to dynamically set the Casdoor redirectUris
# The entrypoint.sh script in casdoor container will replace redirectUris with ${CONSOLE_DOMAIN}/callback
CONSOLE_CASDOOR_URL=${HOST_BASE_ADDRESS}:${CASDOOR_PORT}
CONSOLE_CASDOOR_ID=astron-agent-client
CONSOLE_CASDOOR_APP=astron-agent-app
CONSOLE_CASDOOR_ORG=built-in
# ============================================================================
# Component-specific Environment Variable Configuration
# ============================================================================
# - Tenant-specific Configuration
# Database type
DATABASE_DB_TYPE=mysql
# Database username
DATABASE_USERNAME=${MYSQL_USER:-root}
# Database password
DATABASE_PASSWORD=${MYSQL_PASSWORD:-root123}
# Database (ip:port)/database name
DATABASE_URL=(mysql:3306)/tenant
# Database maximum connections
DATABASE_MAX_OPEN_CONNS=5
# Database maximum idle connections
DATABASE_MAX_IDLE_CONNS=5
# Log path
LOG_PATH=log.txt
# - DATABASE-specific Configuration
DATABASE_POSTGRES_DATABASE=sparkdb_manager
# Default public cloud address: https://newapi.iflyrpa.com
# RPA service has been open sourced, please refer to for details https://github.com/iflytek/astron-rpa
RPA_URL=https://newapi.iflyrpa.com
# - RPA-specific Configuration
XIAOWU_RPA_TASK_CREATE_URL=${RPA_URL}/api/rpa-openapi/workflows/execute-async
XIAOWU_RPA_TASK_QUERY_URL=${RPA_URL}/api/rpa-openapi/executions
# - Link-specific Configuration
LINK_MYSQL_DB=spark-link
# - Agent-specific Configuration
# Service configuration
SERVICE_HOST=0.0.0.0
SERVICE_WORKERS=1
SERVICE_RELOAD=false
SERVICE_WS_PING_INTERVAL=false
SERVICE_WS_PING_TIMEOUT=false
# MySQL Configuration
AGENT_MYSQL_DB=agent
# ELK upload configuration
UPLOAD_NODE_TRACE=true
UPLOAD_METRICS=true
# Kafka
AGENT_KAFKA_TOPIC=spark-agent-builder
# Link Service URLs
GET_LINK_URL=http://core-link:18888/api/v1/tools
VERSIONS_LINK_URL=http://core-link:18888/api/v1/tools/versions
RUN_LINK_URL=http://core-link:18888/api/v1/tools/http_run
# Workflow Service URLs
GET_WORKFLOWS_URL=http://core-workflow:${CORE_WORKFLOW_PORT:-7880}/sparkflow/v1/protocol/get
WORKFLOW_SSE_BASE_URL=http://core-workflow:${CORE_WORKFLOW_PORT:-7880}/workflow/v1
# Knowledge Service URLs
CHUNK_QUERY_URL=http://core-knowledge:${CORE_KNOWLEDGE_PORT:-20010}/knowledge/v1/chunk/query
# MCP Plugin URLs
LIST_MCP_PLUGIN_URL=http://core-link:18888/api/v1/mcp/tool_list
RUN_MCP_PLUGIN_URL=http://core-link:18888/api/v1/mcp/call_tool
# Application authentication configuration
APP_AUTH_HOST=core-tenant:${CORE_TENANT_PORT:-5052}
APP_AUTH_PROT=http
APP_AUTH_API_KEY=7b709739e8da44536127a333c7603a83
APP_AUTH_SECRET=NjhmY2NmM2NkZDE4MDFlNmM5ZjcyZjMy
# - Knowledge-specific Configuration
RAGFLOW_BASE_URL=http://your-ragflow-url/
RAGFLOW_API_TOKEN=your-ragflow-token
RAGFLOW_TIMEOUT=60
RAGFLOW_DEFAULT_GROUP=your-default-group
XINGHUO_DATASET_ID=
# - Workflow-specific Configuration
WORKFLOW_MYSQL_DB=workflow
WORKFLOW_KAFKA_TOPIC=spark-agent-builder
RUNTIME_ENV=dev
# ============================================================================
# Console Module Configuration
# ============================================================================
# Domain config for console module
CONSOLE_DOMAIN=${HOST_BASE_ADDRESS}:${EXPOSE_NGINX_PORT}
# S3/MinIO Configuration for Console backend
OSS_REMOTE_ENDPOINT=${HOST_BASE_ADDRESS}:${EXPOSE_MINIO_PORT}
OSS_BUCKET_CONSOLE=console-oss
OSS_PRESIGN_EXPIRY_SECONDS_CONSOLE=600
# Redis Configuration for Console backend
REDIS_DATABASE_CONSOLE=1
# OAuth2 Configuration for Console Backend Api Server (as OAuth2 Resource Server)
OAUTH2_ISSUER_URI=${CONSOLE_CASDOOR_URL:-http://auth-server:8000}
OAUTH2_JWK_SET_URI=http://casdoor:8000/.well-known/jwks
OAUTH2_AUDIENCE=${CONSOLE_CASDOOR_ID:-your-oauth2-client-id}
# Open Platform API Configuration
# You can create an app after registering an account in the console website to obtain the following parameters. See: https://console.xfyun.cn/
# NOTE! The following three values are required for the service to run properly:
PLATFORM_APP_ID=your-app-id
PLATFORM_API_KEY=your-api-key
PLATFORM_API_SECRET=your-api-secret
# AI Ability Chat Configuration (OpenAI-compatible API)
AI_ABILITY_CHAT_BASE_URL=https://spark-api-open.xf-yun.com/v1
AI_ABILITY_CHAT_MODEL=your-model-id
AI_ABILITY_CHAT_API_KEY=your-api-key
# You can get your own API key and secret on the iFLYTEK Open Platform official website, and purchase API usage or get free quota.
# SPARK LLM API: https://xinghuo.xfyun.cn/sparkapi
# RTASR API: https://www.xfyun.cn/services/rtasr
# For RTASR API, you need to apply for a separate API key from the console website (https://console.xfyun.cn/services/rta):
SPARK_RTASR_API_KEY=your-rtasr-api-key
# IMAGE-GEN API: https://www.xfyun.cn/services/wtop
# For Spark LLM API, there will be an additional API password that needs to be obtained from the console website (https://console.xfyun.cn/services/bm4):
SPARK_API_PASSWORD=your-api-password
# For virtual-man API, you need to apply for a separate API key from the console website (https://virtual-man.xfyun.cn/console/applications)
SPARK_VIRTUAL_MAN_APP_ID=your-virtual-man-app-id
SPARK_VIRTUAL_MAN_API_KEY=your-virtual-man-api-key
SPARK_VIRTUAL_MAN_API_SECRET=your-virtual-man-api-secret
# The AppID requires authorization for the Code DeepSeekV3 (domain: xdeepseekv3) and Spark 3.5 Max (domain: generalv3.5) models to support AI-powered prompt optimization and code generation within the canvas.
SPARK_APP_ID=${PLATFORM_APP_ID}
SPARK_API_KEY=${PLATFORM_API_KEY}
SPARK_API_SECRET=${PLATFORM_API_SECRET}
SPARK_RTASR_APPID=${PLATFORM_APP_ID}
SPARK_RTASR_KEY=${SPARK_RTASR_API_KEY}
SPARK_IMAGE_APP_ID=${PLATFORM_APP_ID}
SPARK_IMAGE_API_KEY=${PLATFORM_API_KEY}
SPARK_IMAGE_API_SECRET=${PLATFORM_API_SECRET}
# Console Hub WeChat platform Configuration
WECHAT_COMPONENT_APPID=your-wechat-component-appid
WECHAT_COMPONENT_SECRET=your-wechat
WECHAT_TOKEN=your-wechat-token
WECHAT_ENCODING_AES_KEY=your-wechat-encoding-aes-key
# core-workflow module's service-to-service API calls
WORKFLOW_CHAT_URL=http://core-workflow:${CORE_WORKFLOW_PORT:-7880}/workflow/v1/chat/completions
WORKFLOW_DEBUG_URL=http://core-workflow:${CORE_WORKFLOW_PORT:-7880}/workflow/v1/debug/chat/completions
WORKFLOW_RESUME_URL=http://core-workflow:${CORE_WORKFLOW_PORT:-7880}/workflow/v1/resume
# Toolkit tenant systemWhen calling the flow system, it is necessary.
# The value here is pre-written to the database when the core-tenant component is started, see: core/tenant/sql/tenant.sql
TENANT_ID=680ab54f
TENANT_KEY=7b709739e8da44536127a333c7603a83
TENANT_SECRET=NjhmY2NmM2NkZDE4MDFlNmM5ZjcyZjMy
# Common appId, requiring Spark Model authorization
COMMON_APPID=${TENANT_ID}
COMMON_APIKEY=${TENANT_KEY}
COMMON_API_SECRET=${TENANT_SECRET}
# Toolkit Admin uid The plugin created by this user ID defaults to the official plugin
ADMIN_UID=9999
APP_URL=http://core-tenant:${CORE_TENANT_PORT:-5052}/v2/app
KNOWLEDGE_URL=http://core-knowledge:${CORE_KNOWLEDGE_PORT:-20010}/knowledge
TOOL_URL=http://core-link:18888
TOOL_RPA_URL=http://core-rpa:17198
WORKFLOW_URL=http://core-workflow:${CORE_WORKFLOW_PORT:-7880}
SPARK_DB_URL=http://core-database:${CORE_DATABASE_PORT:-7990}
# Local model service address: The model service is open source,
# please refer to for details https://github.com/iflytek/astron-xmod-shim/blob/main/README.md
LOCAL_MODEL_URL=http://127.0.0.1:33778
# MaaS Platform Configuration
MAAS_APP_ID=${TENANT_ID}
MAAS_API_KEY=${TENANT_KEY}
MAAS_API_SECRET=${TENANT_SECRET}
MAAS_CONSUMER_ID=${TENANT_ID}
MAAS_CONSUMER_KEY=${TENANT_KEY}
MAAS_CONSUMER_SECRET=${TENANT_SECRET}
MAAS_WORKFLOW_VERSION=http://127.0.0.1:8080/workflow/version
MAAS_SYNCHRONIZE_WORK_FLOW=http://127.0.0.1:8080/workflow
MAAS_PUBLISH=http://127.0.0.1:8080/workflow/publish
MAAS_CLONE_WORK_FLOW=http://127.0.0.1:8080/workflow/internal-clone
MAAS_GET_INPUTS=http://127.0.0.1:8080/workflow/get-inputs-info
MAAS_CAN_PUBLISH_URL=http://127.0.0.1:8080/workflow/can-publish
MAAS_PUBLISH_API=http://core-workflow:${CORE_WORKFLOW_PORT:-7880}/workflow/v1/publish
MAAS_AUTH_API=http://core-workflow:${CORE_WORKFLOW_PORT:-7880}/workflow/v1/auth
MAAS_MCP_REGISTER=http://127.0.0.1:8080/workflow/release
MAAS_WORKFLOW_CONFIG=http://127.0.0.1:8080/workflow/get-flow-advanced-config
BOT_API_CBM_BASE_URL=ws(s)://spark-api-open.xf-yun.com
BOT_API_MAAS_BASE_URL=${CONSOLE_DOMAIN}
TENANT_CREATE_APP=http://core-tenant:${CORE_TENANT_PORT:-5052}/v2/app
TENANT_GET_APP_DETAIL=http://core-tenant:${CORE_TENANT_PORT:-5052}/v2/app/details
# ============================================================================
# Other Configuration
# ============================================================================
# Service availability zone (dx, hf, gz)
SERVICE_LOCATION=hf
# Health check configuration
HEALTH_CHECK_INTERVAL=30s
HEALTH_CHECK_TIMEOUT=10s
HEALTH_CHECK_RETRIES=60