aboutsummaryrefslogtreecommitdiff
path: root/demo_optimizer.py
diff options
context:
space:
mode:
authorFuwn <[email protected]>2025-05-28 07:12:43 -0700
committerFuwn <[email protected]>2025-05-28 07:12:43 -0700
commit035008f3ee1bf5ee84a0cd290d37f222a1cd5e61 (patch)
tree950ffad958908892a40ed9cb69c0db6a23e59a3a /demo_optimizer.py
downloadgenshin-artifact-playground-main.tar.xz
genshin-artifact-playground-main.zip
feat: Initial commitHEADmain
Diffstat (limited to 'demo_optimizer.py')
-rw-r--r--demo_optimizer.py165
1 files changed, 165 insertions, 0 deletions
diff --git a/demo_optimizer.py b/demo_optimizer.py
new file mode 100644
index 0000000..48ea776
--- /dev/null
+++ b/demo_optimizer.py
@@ -0,0 +1,165 @@
+import json
+from artifact_optimizer import ArtifactOptimizer
+
+def run_demo():
+ """Demonstrate the artifact optimizer with different priority scenarios"""
+
+ # Initialize optimizer
+ optimizer = ArtifactOptimizer()
+ optimizer.load_data('data.json', 'characters.json')
+
+ print("="*80)
+ print("šŸŽ® GENSHIN ARTIFACT OPTIMIZER DEMO")
+ print("="*80)
+ print(f"šŸ“¦ Total artifacts available: {len(optimizer.artifacts)}")
+ print(f"šŸ‘„ Characters available: {', '.join(optimizer.character_data.keys())}")
+
+ # Define different priority scenarios
+ scenarios = [
+ {
+ "name": "DPS Priority",
+ "description": "Prioritize main DPS characters first",
+ "priority": ["Fischl", "Chiori", "Furina", "Escoffier"]
+ },
+ {
+ "name": "Support Priority",
+ "description": "Prioritize support characters first",
+ "priority": ["Furina", "Escoffier", "Fischl", "Chiori"]
+ },
+ {
+ "name": "ER Demanding First",
+ "description": "Prioritize characters with high ER requirements",
+ "priority": ["Escoffier", "Furina", "Fischl", "Chiori"]
+ },
+ {
+ "name": "Reverse Order",
+ "description": "Completely reverse priority order",
+ "priority": ["Chiori", "Fischl", "Escoffier", "Furina"]
+ }
+ ]
+
+ # Store results for comparison
+ all_results = {}
+
+ # Run each scenario
+ for i, scenario in enumerate(scenarios, 1):
+ print(f"\n{'='*60}")
+ print(f"SCENARIO {i}: {scenario['name']}")
+ print(f"{'='*60}")
+ print(f"šŸ“ {scenario['description']}")
+ print(f"šŸŽÆ Priority: {' -> '.join(scenario['priority'])}")
+
+ # Run optimization
+ results = optimizer.optimize_builds(scenario['priority'])
+ all_results[scenario['name']] = results
+
+ # Quick summary
+ successful = sum(1 for data in results.values() if data['build'] is not None)
+ if successful > 0:
+ avg_score = sum(data['average_score'] for data in results.values() if data['build']) / successful
+ er_compliant = sum(1 for data in results.values()
+ if data['build'] and data['er_info']['meets_requirement'])
+
+ print(f"āœ… Successful builds: {successful}/4")
+ print(f"šŸ“Š Average quality: {avg_score:.1f}/100")
+ print(f"⚔ ER compliant: {er_compliant}/4")
+
+ # Show individual scores
+ print("\nšŸ“‹ Individual Scores:")
+ for char, data in results.items():
+ if data['build']:
+ er_status = "āœ…" if data['er_info']['meets_requirement'] else "āš ļø"
+ print(f" {char}: {data['average_score']:.1f}/100 {er_status}")
+ else:
+ print(f" {char}: No build found āŒ")
+ else:
+ print("āŒ No successful builds found!")
+
+ # Comparison analysis
+ print(f"\n{'='*80}")
+ print("šŸ“Š SCENARIO COMPARISON")
+ print(f"{'='*80}")
+
+ comparison_data = []
+ for scenario_name, results in all_results.items():
+ successful = sum(1 for data in results.values() if data['build'] is not None)
+ if successful > 0:
+ avg_score = sum(data['average_score'] for data in results.values() if data['build']) / successful
+ er_compliant = sum(1 for data in results.values()
+ if data['build'] and data['er_info']['meets_requirement'])
+
+ comparison_data.append({
+ 'scenario': scenario_name,
+ 'successful_builds': successful,
+ 'average_score': avg_score,
+ 'er_compliant': er_compliant,
+ 'total_score': successful * avg_score # Weighted score
+ })
+
+ # Sort by total weighted score
+ comparison_data.sort(key=lambda x: x['total_score'], reverse=True)
+
+ print("šŸ† Ranking (by total weighted score):")
+ for i, data in enumerate(comparison_data, 1):
+ print(f" {i}. {data['scenario']}")
+ print(f" Builds: {data['successful_builds']}/4 | "
+ f"Avg Score: {data['average_score']:.1f} | "
+ f"ER Compliant: {data['er_compliant']}/4 | "
+ f"Total: {data['total_score']:.1f}")
+
+ # Character-specific analysis
+ print(f"\nšŸ“ˆ CHARACTER PERFORMANCE ACROSS SCENARIOS")
+ print(f"{'='*60}")
+
+ characters = list(optimizer.character_data.keys())
+ for char in characters:
+ print(f"\nšŸ” {char}:")
+ char_scores = []
+ for scenario_name, results in all_results.items():
+ if results[char]['build']:
+ score = results[char]['average_score']
+ er_ok = results[char]['er_info']['meets_requirement']
+ char_scores.append(score)
+ status = "āœ…" if er_ok else "āš ļø"
+ print(f" {scenario_name}: {score:.1f}/100 {status}")
+ else:
+ print(f" {scenario_name}: No build āŒ")
+
+ if char_scores:
+ print(f" šŸ“Š Score range: {min(char_scores):.1f} - {max(char_scores):.1f}")
+ print(f" šŸ“Š Average: {sum(char_scores)/len(char_scores):.1f}")
+
+ # Priority impact analysis
+ print(f"\nšŸŽÆ PRIORITY IMPACT ANALYSIS")
+ print(f"{'='*60}")
+
+ print("Key Findings:")
+
+ # Find which characters benefit most from high priority
+ char_variance = {}
+ for char in characters:
+ scores = []
+ for results in all_results.values():
+ if results[char]['build']:
+ scores.append(results[char]['average_score'])
+
+ if len(scores) > 1:
+ variance = max(scores) - min(scores)
+ char_variance[char] = variance
+
+ if char_variance:
+ most_affected = max(char_variance.items(), key=lambda x: x[1])
+ least_affected = min(char_variance.items(), key=lambda x: x[1])
+
+ print(f"• Most affected by priority: {most_affected[0]} (variance: {most_affected[1]:.1f})")
+ print(f"• Least affected by priority: {least_affected[0]} (variance: {least_affected[1]:.1f})")
+
+ # Save detailed results
+ with open('demo_results.json', 'w') as f:
+ json.dump(all_results, f, indent=2, default=str)
+
+ print(f"\nšŸ’¾ Detailed results saved to 'demo_results.json'")
+ print(f"\nšŸŽ‰ Demo completed! Try different scenarios to see how priority affects builds.")
+
+if __name__ == "__main__":
+ run_demo() \ No newline at end of file