aboutsummaryrefslogtreecommitdiff
path: root/demo_optimizer.py
blob: 48ea7769f93022b53b11d8a5ff5cc3087ecc2a16 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
import json
from artifact_optimizer import ArtifactOptimizer

def run_demo():
    """Demonstrate the artifact optimizer with different priority scenarios"""
    
    # Initialize optimizer
    optimizer = ArtifactOptimizer()
    optimizer.load_data('data.json', 'characters.json')
    
    print("="*80)
    print("🎮 GENSHIN ARTIFACT OPTIMIZER DEMO")
    print("="*80)
    print(f"📦 Total artifacts available: {len(optimizer.artifacts)}")
    print(f"👥 Characters available: {', '.join(optimizer.character_data.keys())}")
    
    # Define different priority scenarios
    scenarios = [
        {
            "name": "DPS Priority",
            "description": "Prioritize main DPS characters first",
            "priority": ["Fischl", "Chiori", "Furina", "Escoffier"]
        },
        {
            "name": "Support Priority", 
            "description": "Prioritize support characters first",
            "priority": ["Furina", "Escoffier", "Fischl", "Chiori"]
        },
        {
            "name": "ER Demanding First",
            "description": "Prioritize characters with high ER requirements",
            "priority": ["Escoffier", "Furina", "Fischl", "Chiori"]
        },
        {
            "name": "Reverse Order",
            "description": "Completely reverse priority order",
            "priority": ["Chiori", "Fischl", "Escoffier", "Furina"]
        }
    ]
    
    # Store results for comparison
    all_results = {}
    
    # Run each scenario
    for i, scenario in enumerate(scenarios, 1):
        print(f"\n{'='*60}")
        print(f"SCENARIO {i}: {scenario['name']}")
        print(f"{'='*60}")
        print(f"📝 {scenario['description']}")
        print(f"🎯 Priority: {' -> '.join(scenario['priority'])}")
        
        # Run optimization
        results = optimizer.optimize_builds(scenario['priority'])
        all_results[scenario['name']] = results
        
        # Quick summary
        successful = sum(1 for data in results.values() if data['build'] is not None)
        if successful > 0:
            avg_score = sum(data['average_score'] for data in results.values() if data['build']) / successful
            er_compliant = sum(1 for data in results.values() 
                             if data['build'] and data['er_info']['meets_requirement'])
            
            print(f"✅ Successful builds: {successful}/4")
            print(f"📊 Average quality: {avg_score:.1f}/100")
            print(f"⚡ ER compliant: {er_compliant}/4")
            
            # Show individual scores
            print("\n📋 Individual Scores:")
            for char, data in results.items():
                if data['build']:
                    er_status = "✅" if data['er_info']['meets_requirement'] else "⚠️"
                    print(f"   {char}: {data['average_score']:.1f}/100 {er_status}")
                else:
                    print(f"   {char}: No build found ❌")
        else:
            print("❌ No successful builds found!")
    
    # Comparison analysis
    print(f"\n{'='*80}")
    print("📊 SCENARIO COMPARISON")
    print(f"{'='*80}")
    
    comparison_data = []
    for scenario_name, results in all_results.items():
        successful = sum(1 for data in results.values() if data['build'] is not None)
        if successful > 0:
            avg_score = sum(data['average_score'] for data in results.values() if data['build']) / successful
            er_compliant = sum(1 for data in results.values() 
                             if data['build'] and data['er_info']['meets_requirement'])
            
            comparison_data.append({
                'scenario': scenario_name,
                'successful_builds': successful,
                'average_score': avg_score,
                'er_compliant': er_compliant,
                'total_score': successful * avg_score  # Weighted score
            })
    
    # Sort by total weighted score
    comparison_data.sort(key=lambda x: x['total_score'], reverse=True)
    
    print("🏆 Ranking (by total weighted score):")
    for i, data in enumerate(comparison_data, 1):
        print(f"   {i}. {data['scenario']}")
        print(f"      Builds: {data['successful_builds']}/4 | "
              f"Avg Score: {data['average_score']:.1f} | "
              f"ER Compliant: {data['er_compliant']}/4 | "
              f"Total: {data['total_score']:.1f}")
    
    # Character-specific analysis
    print(f"\n📈 CHARACTER PERFORMANCE ACROSS SCENARIOS")
    print(f"{'='*60}")
    
    characters = list(optimizer.character_data.keys())
    for char in characters:
        print(f"\n🔍 {char}:")
        char_scores = []
        for scenario_name, results in all_results.items():
            if results[char]['build']:
                score = results[char]['average_score']
                er_ok = results[char]['er_info']['meets_requirement']
                char_scores.append(score)
                status = "✅" if er_ok else "⚠️"
                print(f"   {scenario_name}: {score:.1f}/100 {status}")
            else:
                print(f"   {scenario_name}: No build ❌")
        
        if char_scores:
            print(f"   📊 Score range: {min(char_scores):.1f} - {max(char_scores):.1f}")
            print(f"   📊 Average: {sum(char_scores)/len(char_scores):.1f}")
    
    # Priority impact analysis
    print(f"\n🎯 PRIORITY IMPACT ANALYSIS")
    print(f"{'='*60}")
    
    print("Key Findings:")
    
    # Find which characters benefit most from high priority
    char_variance = {}
    for char in characters:
        scores = []
        for results in all_results.values():
            if results[char]['build']:
                scores.append(results[char]['average_score'])
        
        if len(scores) > 1:
            variance = max(scores) - min(scores)
            char_variance[char] = variance
    
    if char_variance:
        most_affected = max(char_variance.items(), key=lambda x: x[1])
        least_affected = min(char_variance.items(), key=lambda x: x[1])
        
        print(f"• Most affected by priority: {most_affected[0]} (variance: {most_affected[1]:.1f})")
        print(f"• Least affected by priority: {least_affected[0]} (variance: {least_affected[1]:.1f})")
    
    # Save detailed results
    with open('demo_results.json', 'w') as f:
        json.dump(all_results, f, indent=2, default=str)
    
    print(f"\n💾 Detailed results saved to 'demo_results.json'")
    print(f"\n🎉 Demo completed! Try different scenarios to see how priority affects builds.")

if __name__ == "__main__":
    run_demo()