diff options
Diffstat (limited to 'demo_optimizer.py')
| -rw-r--r-- | demo_optimizer.py | 165 |
1 files changed, 165 insertions, 0 deletions
diff --git a/demo_optimizer.py b/demo_optimizer.py new file mode 100644 index 0000000..48ea776 --- /dev/null +++ b/demo_optimizer.py @@ -0,0 +1,165 @@ +import json +from artifact_optimizer import ArtifactOptimizer + +def run_demo(): + """Demonstrate the artifact optimizer with different priority scenarios""" + + # Initialize optimizer + optimizer = ArtifactOptimizer() + optimizer.load_data('data.json', 'characters.json') + + print("="*80) + print("š® GENSHIN ARTIFACT OPTIMIZER DEMO") + print("="*80) + print(f"š¦ Total artifacts available: {len(optimizer.artifacts)}") + print(f"š„ Characters available: {', '.join(optimizer.character_data.keys())}") + + # Define different priority scenarios + scenarios = [ + { + "name": "DPS Priority", + "description": "Prioritize main DPS characters first", + "priority": ["Fischl", "Chiori", "Furina", "Escoffier"] + }, + { + "name": "Support Priority", + "description": "Prioritize support characters first", + "priority": ["Furina", "Escoffier", "Fischl", "Chiori"] + }, + { + "name": "ER Demanding First", + "description": "Prioritize characters with high ER requirements", + "priority": ["Escoffier", "Furina", "Fischl", "Chiori"] + }, + { + "name": "Reverse Order", + "description": "Completely reverse priority order", + "priority": ["Chiori", "Fischl", "Escoffier", "Furina"] + } + ] + + # Store results for comparison + all_results = {} + + # Run each scenario + for i, scenario in enumerate(scenarios, 1): + print(f"\n{'='*60}") + print(f"SCENARIO {i}: {scenario['name']}") + print(f"{'='*60}") + print(f"š {scenario['description']}") + print(f"šÆ Priority: {' -> '.join(scenario['priority'])}") + + # Run optimization + results = optimizer.optimize_builds(scenario['priority']) + all_results[scenario['name']] = results + + # Quick summary + successful = sum(1 for data in results.values() if data['build'] is not None) + if successful > 0: + avg_score = sum(data['average_score'] for data in results.values() if data['build']) / successful + er_compliant = sum(1 for data in results.values() + if data['build'] and data['er_info']['meets_requirement']) + + print(f"ā
Successful builds: {successful}/4") + print(f"š Average quality: {avg_score:.1f}/100") + print(f"ā” ER compliant: {er_compliant}/4") + + # Show individual scores + print("\nš Individual Scores:") + for char, data in results.items(): + if data['build']: + er_status = "ā
" if data['er_info']['meets_requirement'] else "ā ļø" + print(f" {char}: {data['average_score']:.1f}/100 {er_status}") + else: + print(f" {char}: No build found ā") + else: + print("ā No successful builds found!") + + # Comparison analysis + print(f"\n{'='*80}") + print("š SCENARIO COMPARISON") + print(f"{'='*80}") + + comparison_data = [] + for scenario_name, results in all_results.items(): + successful = sum(1 for data in results.values() if data['build'] is not None) + if successful > 0: + avg_score = sum(data['average_score'] for data in results.values() if data['build']) / successful + er_compliant = sum(1 for data in results.values() + if data['build'] and data['er_info']['meets_requirement']) + + comparison_data.append({ + 'scenario': scenario_name, + 'successful_builds': successful, + 'average_score': avg_score, + 'er_compliant': er_compliant, + 'total_score': successful * avg_score # Weighted score + }) + + # Sort by total weighted score + comparison_data.sort(key=lambda x: x['total_score'], reverse=True) + + print("š Ranking (by total weighted score):") + for i, data in enumerate(comparison_data, 1): + print(f" {i}. {data['scenario']}") + print(f" Builds: {data['successful_builds']}/4 | " + f"Avg Score: {data['average_score']:.1f} | " + f"ER Compliant: {data['er_compliant']}/4 | " + f"Total: {data['total_score']:.1f}") + + # Character-specific analysis + print(f"\nš CHARACTER PERFORMANCE ACROSS SCENARIOS") + print(f"{'='*60}") + + characters = list(optimizer.character_data.keys()) + for char in characters: + print(f"\nš {char}:") + char_scores = [] + for scenario_name, results in all_results.items(): + if results[char]['build']: + score = results[char]['average_score'] + er_ok = results[char]['er_info']['meets_requirement'] + char_scores.append(score) + status = "ā
" if er_ok else "ā ļø" + print(f" {scenario_name}: {score:.1f}/100 {status}") + else: + print(f" {scenario_name}: No build ā") + + if char_scores: + print(f" š Score range: {min(char_scores):.1f} - {max(char_scores):.1f}") + print(f" š Average: {sum(char_scores)/len(char_scores):.1f}") + + # Priority impact analysis + print(f"\nšÆ PRIORITY IMPACT ANALYSIS") + print(f"{'='*60}") + + print("Key Findings:") + + # Find which characters benefit most from high priority + char_variance = {} + for char in characters: + scores = [] + for results in all_results.values(): + if results[char]['build']: + scores.append(results[char]['average_score']) + + if len(scores) > 1: + variance = max(scores) - min(scores) + char_variance[char] = variance + + if char_variance: + most_affected = max(char_variance.items(), key=lambda x: x[1]) + least_affected = min(char_variance.items(), key=lambda x: x[1]) + + print(f"⢠Most affected by priority: {most_affected[0]} (variance: {most_affected[1]:.1f})") + print(f"⢠Least affected by priority: {least_affected[0]} (variance: {least_affected[1]:.1f})") + + # Save detailed results + with open('demo_results.json', 'w') as f: + json.dump(all_results, f, indent=2, default=str) + + print(f"\nš¾ Detailed results saved to 'demo_results.json'") + print(f"\nš Demo completed! Try different scenarios to see how priority affects builds.") + +if __name__ == "__main__": + run_demo()
\ No newline at end of file |