File size: 4,890 Bytes
1afc366
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
"""
Quick validation tests for app.py improvements
Tests the new functions without launching the full Gradio app
"""
import numpy as np
import pandas as pd

# Import test - check if app.py can be imported
try:
    import sys
    sys.path.insert(0, '/Users/dennissinden/GradioApp/TempoPFN')
    print("βœ“ Python path configured")
except Exception as e:
    print(f"βœ— Path configuration failed: {e}")
    sys.exit(1)

# Test metrics calculation logic (standalone)
def test_metrics_calculation():
    """Test the metrics calculation with sample data"""
    print("\n=== Testing Metrics Calculation ===")

    # Create sample data
    np.random.seed(42)
    history = np.random.randn(100, 1) * 10 + 50
    predictions = np.random.randn(20, 1) * 10 + 50
    future = np.random.randn(20, 1) * 10 + 50

    # Simulate the calculate_metrics function logic
    try:
        from scipy import stats as scipy_stats

        metrics = {}
        metrics['data_mean'] = float(np.mean(history))
        metrics['data_std'] = float(np.std(history))
        metrics['latest_price'] = float(history[-1, 0])
        metrics['forecast_next'] = float(predictions[0, 0])

        print(f"βœ“ Mean: {metrics['data_mean']:.2f}")
        print(f"βœ“ Std: {metrics['data_std']:.2f}")
        print(f"βœ“ Latest: {metrics['latest_price']:.2f}")
        print(f"βœ“ Forecast: {metrics['forecast_next']:.2f}")

        return True
    except Exception as e:
        print(f"βœ— Metrics calculation failed: {e}")
        return False

# Test export functionality logic
def test_export_logic():
    """Test export CSV logic"""
    print("\n=== Testing Export Logic ===")

    try:
        # Simulate forecast results
        forecast_results = {
            'history': np.random.randn(100, 1),
            'predictions': np.random.randn(20, 1),
            'future': np.random.randn(20, 1)
        }

        history = forecast_results['history'].flatten()
        predictions = forecast_results['predictions'].flatten()
        future = forecast_results['future'].flatten()

        max_len = max(len(history), len(predictions))
        df_data = {
            'Time_Index': list(range(max_len)),
            'Historical_Value': list(history) + [np.nan] * (max_len - len(history)),
            'Predicted_Value': [np.nan] * len(history) + list(predictions[:max_len - len(history)]),
            'True_Future_Value': [np.nan] * len(history) + list(future[:max_len - len(history)])
        }

        df = pd.DataFrame(df_data)
        print(f"βœ“ DataFrame created with {len(df)} rows")
        print(f"βœ“ Columns: {list(df.columns)}")

        return True
    except Exception as e:
        print(f"βœ— Export logic failed: {e}")
        return False

# Test visualization logic
def test_visualization_logic():
    """Test advanced visualization creation logic"""
    print("\n=== Testing Visualization Logic ===")

    try:
        from plotly.subplots import make_subplots
        import plotly.graph_objects as go

        # Create sample subplots
        fig = make_subplots(
            rows=2, cols=2,
            subplot_titles=('Test 1', 'Test 2', 'Test 3', 'Test 4')
        )

        # Add sample data
        x = np.arange(10)
        y = np.random.randn(10)

        fig.add_trace(go.Scatter(x=x, y=y, name='Test'), row=1, col=1)

        print("βœ“ Plotly subplots created successfully")
        print("βœ“ Trace added successfully")

        return True
    except Exception as e:
        print(f"βœ— Visualization logic failed: {e}")
        return False

# Test syntax and imports
def test_app_syntax():
    """Test if app.py has valid syntax"""
    print("\n=== Testing App Syntax ===")

    try:
        import py_compile
        py_compile.compile('app.py', doraise=True)
        print("βœ“ app.py syntax is valid")
        return True
    except py_compile.PyCompileError as e:
        print(f"βœ— Syntax error in app.py: {e}")
        return False

def main():
    print("=" * 50)
    print("APP IMPROVEMENTS VALIDATION TEST")
    print("=" * 50)

    results = []

    results.append(("Metrics Calculation", test_metrics_calculation()))
    results.append(("Export Logic", test_export_logic()))
    results.append(("Visualization Logic", test_visualization_logic()))
    results.append(("App Syntax", test_app_syntax()))

    print("\n" + "=" * 50)
    print("TEST SUMMARY")
    print("=" * 50)

    for name, passed in results:
        status = "PASS" if passed else "FAIL"
        symbol = "βœ“" if passed else "βœ—"
        print(f"{symbol} {name}: {status}")

    all_passed = all(result[1] for result in results)
    print("\n" + "=" * 50)
    if all_passed:
        print("βœ“ ALL TESTS PASSED")
    else:
        print("βœ— SOME TESTS FAILED")
    print("=" * 50)

    return all_passed

if __name__ == "__main__":
    import sys
    success = main()
    sys.exit(0 if success else 1)