-
Notifications
You must be signed in to change notification settings - Fork 57
Expand file tree
/
Copy pathconfig.yaml
More file actions
62 lines (45 loc) · 1.66 KB
/
config.yaml
File metadata and controls
62 lines (45 loc) · 1.66 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
# This file contains all configurable parameters for the HGM system.
# Language Model Configuration
llm:
# Model used for self-improvement tasks
self_improve_llm: "gpt-5-mini"
# Model used for downstream evaluation tasks
downstream_llm: "gpt-5-mini"
# Model used for problem diagnosis
diagnose_llm: "gpt-5-mini"
# Optimization Algorithm Parameters
optimization:
# Alpha parameter for node expansion (controls exploration vs exploitation)
alpha: 0.6
# Beta parameter for cooling down factor
beta: 1.0
# Whether to use decreasing temperature over iterations
cool_down: false
# Randomness level for evaluation task selection (0.0 = deterministic, 1.0 = fully random)
eval_random_level: 1.0
# Number of pseudo descendant evaluations for tree search
n_pseudo_descendant_evals: 10000
# Execution and Resource Management
execution:
# Number of parallel workers for self-improvement attempts
max_workers: 20
# Timeout for self-improvement attempts (in seconds)
self_improve_timeout: 3600 # 1 hour
# Timeout for evaluation attempts (in seconds)
evaluation_timeout: 3600 # 1 hour
# Maximum number of task evaluations (evolution iterations)
max_task_evals: 800
# Evaluation Settings
evaluation:
# Skip full evaluation on SWE-bench if node is top N performing
full_eval: false
# Use Polyglot benchmark instead of SWE-bench
polyglot: false
# Path Configuration
paths:
# Output directory for results (if null, will be auto-generated)
output_dir: "output_hgm:"
# Directory to continue a previous run from
continue_from: null
# Name of the initial agent (required)
initial_agent_name: "default_agent"