-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtrain_all.py
More file actions
323 lines (266 loc) · 10.9 KB
/
train_all.py
File metadata and controls
323 lines (266 loc) · 10.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
"""
Master Training Script - Train All Algorithm Recommender Models
=================================================================
This script trains all five algorithm recommendation models in one command:
1. Sorting Algorithm Selector
2. Array Search Algorithm Selector
3. Graph Search Algorithm Selector
4. String Search Algorithm Selector
5. Tree Search Algorithm Selector
Each model is trained independently with its own synthetic dataset,
and all models + encoders are saved to the /models directory.
Usage:
python train_all.py [--sorting_instances 1000] [--array_instances 500]
[--graph_instances 400] [--seed 42]
Output Files:
models/sorting_model.pkl
models/sorting_algo_encoder.pkl
models/array_search_model.pkl
models/array_search_algo_encoder.pkl
models/graph_search_model.pkl
models/graph_search_algo_encoder.pkl
models/string_search_model.pkl
models/string_search_algo_encoder.pkl
models/tree_search_model.pkl
models/tree_search_algo_encoder.pkl
Author: Algorithm Recommender Research Team
Version: 2.0.0
"""
import os
import sys
import argparse
import time
from typing import Dict
# Add package root to path
PACKAGE_ROOT = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, PACKAGE_ROOT)
def train_all_models(
sorting_instances: int = 1000,
array_instances: int = 500,
graph_instances: int = 400,
seed: int = 42,
output_dir: str = None,
verbose: bool = True
) -> Dict[str, Dict]:
"""
Train all algorithm recommendation models.
Args:
sorting_instances: Number of instances for sorting model.
array_instances: Number of instances for array search model.
graph_instances: Number of instances for graph search model.
seed: Random seed for reproducibility.
output_dir: Directory for saved models.
verbose: Print progress.
Returns:
Dictionary with metrics for each model.
"""
if output_dir is None:
output_dir = os.path.join(PACKAGE_ROOT, 'models')
os.makedirs(output_dir, exist_ok=True)
all_metrics = {}
total_start = time.time()
if verbose:
print("=" * 70)
print("ALGORITHM RECOMMENDER SYSTEM - MASTER TRAINING PIPELINE")
print("=" * 70)
print(f"\nConfiguration:")
print(f" Sorting instances: {sorting_instances}")
print(f" Array search instances: {array_instances}")
print(f" Graph search instances: {graph_instances}")
print(f" Random seed: {seed}")
print(f" Output directory: {output_dir}")
print()
# =========================================================================
# TRAIN SORTING MODEL
# =========================================================================
if verbose:
print("\n" + "=" * 70)
print("TRAINING MODEL 1/3: SORTING ALGORITHM SELECTOR")
print("=" * 70)
from sorting.train import train_sorting_model
sorting_start = time.time()
sorting_metrics = train_sorting_model(
num_instances=sorting_instances,
size_range=(10, 3000),
seed=seed,
output_dir=output_dir,
verbose=verbose
)
sorting_time = time.time() - sorting_start
all_metrics['sorting'] = sorting_metrics
all_metrics['sorting']['total_time'] = sorting_time
if verbose:
print(f"\nSorting model trained in {sorting_time:.1f} seconds")
# =========================================================================
# TRAIN ARRAY SEARCH MODEL
# =========================================================================
if verbose:
print("\n" + "=" * 70)
print("TRAINING MODEL 2/3: ARRAY SEARCH ALGORITHM SELECTOR")
print("=" * 70)
from searching.array_search.train import train_array_search_model
array_start = time.time()
array_metrics = train_array_search_model(
num_instances=array_instances,
size_range=(10, 3000),
seed=seed,
output_dir=output_dir,
verbose=verbose
)
array_time = time.time() - array_start
all_metrics['array_search'] = array_metrics
all_metrics['array_search']['total_time'] = array_time
if verbose:
print(f"\nArray search model trained in {array_time:.1f} seconds")
# =========================================================================
# TRAIN GRAPH SEARCH MODEL
# =========================================================================
if verbose:
print("\n" + "=" * 70)
print("TRAINING MODEL 3/3: GRAPH SEARCH ALGORITHM SELECTOR")
print("=" * 70)
from searching.graph_search.train import train_graph_search_model
graph_start = time.time()
graph_metrics = train_graph_search_model(
num_instances=graph_instances,
size_range=(10, 100),
seed=seed,
output_dir=output_dir,
verbose=verbose
)
graph_time = time.time() - graph_start
all_metrics['graph_search'] = graph_metrics
all_metrics['graph_search']['total_time'] = graph_time
if verbose:
print(f"\nGraph search model trained in {graph_time:.1f} seconds")
# =========================================================================
# TRAIN STRING SEARCH MODEL
# =========================================================================
if verbose:
print("\n" + "=" * 70)
print("TRAINING MODEL 4/5: STRING SEARCH ALGORITHM SELECTOR")
print("=" * 70)
from searching.string_search.train import train_string_search_model
string_start = time.time()
string_search_path = os.path.join(output_dir, 'string_search_model.pkl')
string_recommender = train_string_search_model(
num_instances=300,
output_path=string_search_path,
seed=seed,
verbose=verbose
)
string_time = time.time() - string_start
all_metrics['string_search'] = {'total_time': string_time}
if verbose:
print(f"\nString search model trained in {string_time:.1f} seconds")
# =========================================================================
# TRAIN TREE SEARCH MODEL
# =========================================================================
if verbose:
print("\n" + "=" * 70)
print("TRAINING MODEL 5/5: TREE SEARCH ALGORITHM SELECTOR")
print("=" * 70)
from searching.tree_search.train import train_tree_search_model
tree_start = time.time()
tree_search_path = os.path.join(output_dir, 'tree_search_model.pkl')
tree_recommender = train_tree_search_model(
num_instances=300,
output_path=tree_search_path,
seed=seed,
verbose=verbose
)
tree_time = time.time() - tree_start
all_metrics['tree_search'] = {'total_time': tree_time}
if verbose:
print(f"\nTree search model trained in {tree_time:.1f} seconds")
# =========================================================================
# FINAL SUMMARY
# =========================================================================
total_time = time.time() - total_start
if verbose:
print("\n" + "=" * 70)
print("ALL TRAINING COMPLETE")
print("=" * 70)
print("\n📊 TRAINING SUMMARY")
print("-" * 50)
print("\nSorting Algorithm Selector:")
print(f" CV Accuracy: {all_metrics['sorting']['cv_mean']:.4f} ± {all_metrics['sorting']['cv_std']:.4f}")
print(f" Test Accuracy: {all_metrics['sorting']['test_accuracy']:.4f}")
print(f" Training Time: {sorting_time:.1f}s")
print("\nArray Search Algorithm Selector:")
print(f" CV Accuracy: {all_metrics['array_search']['cv_mean']:.4f} ± {all_metrics['array_search']['cv_std']:.4f}")
print(f" Test Accuracy: {all_metrics['array_search']['test_accuracy']:.4f}")
print(f" Training Time: {array_time:.1f}s")
print("\nGraph Search Algorithm Selector:")
print(f" CV Accuracy: {all_metrics['graph_search']['cv_mean']:.4f} ± {all_metrics['graph_search']['cv_std']:.4f}")
print(f" Test Accuracy: {all_metrics['graph_search']['test_accuracy']:.4f}")
print(f" Training Time: {graph_time:.1f}s")
print("\nString Search Algorithm Selector:")
print(f" Training Time: {string_time:.1f}s")
print("\nTree Search Algorithm Selector:")
print(f" Training Time: {tree_time:.1f}s")
print("\n📁 SAVED MODEL FILES")
print("-" * 50)
print(f" {os.path.join(output_dir, 'sorting_model.pkl')}")
print(f" {os.path.join(output_dir, 'sorting_algo_encoder.pkl')}")
print(f" {os.path.join(output_dir, 'array_search_model.pkl')}")
print(f" {os.path.join(output_dir, 'array_search_algo_encoder.pkl')}")
print(f" {os.path.join(output_dir, 'graph_search_model.pkl')}")
print(f" {os.path.join(output_dir, 'graph_search_algo_encoder.pkl')}")
print(f" {os.path.join(output_dir, 'string_search_model.pkl')}")
print(f" {os.path.join(output_dir, 'string_search_algo_encoder.pkl')}")
print(f" {os.path.join(output_dir, 'tree_search_model.pkl')}")
print(f" {os.path.join(output_dir, 'tree_search_algo_encoder.pkl')}")
print(f"\n⏱️ Total training time: {total_time:.1f} seconds ({total_time/60:.1f} minutes)")
print("\n✅ All models trained and saved successfully!")
return all_metrics
def main():
"""Command-line entry point."""
parser = argparse.ArgumentParser(
description="Train all algorithm recommendation models",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--sorting_instances', type=int, default=1000,
help='Training instances for sorting model'
)
parser.add_argument(
'--array_instances', type=int, default=500,
help='Training instances for array search model'
)
parser.add_argument(
'--graph_instances', type=int, default=400,
help='Training instances for graph search model'
)
parser.add_argument(
'--seed', type=int, default=42,
help='Random seed for reproducibility'
)
parser.add_argument(
'--output', type=str, default=None,
help='Output directory for models'
)
parser.add_argument(
'--quiet', action='store_true',
help='Suppress verbose output'
)
parser.add_argument(
'--fast', action='store_true',
help='Fast mode with fewer instances (for testing)'
)
args = parser.parse_args()
# Fast mode for quick testing
if args.fast:
args.sorting_instances = 200
args.array_instances = 100
args.graph_instances = 100
train_all_models(
sorting_instances=args.sorting_instances,
array_instances=args.array_instances,
graph_instances=args.graph_instances,
seed=args.seed,
output_dir=args.output,
verbose=not args.quiet
)
if __name__ == '__main__':
main()