feat: implement strategy metadata and dashboard simulation panel
- Added display_name and description to BaseStrategy - Updated MA44 and MA125 strategies with metadata - Added /api/v1/strategies endpoint for dynamic discovery - Added Strategy Simulation panel to dashboard with date picker and tooltips - Implemented JS polling for backtest results in dashboard - Added performance test scripts and DB connection guide - Expanded indicator config to all 15 timeframes
This commit is contained in:
107
scripts/check_db_stats.py
Normal file
107
scripts/check_db_stats.py
Normal file
@ -0,0 +1,107 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Quick database statistics checker
|
||||
Shows oldest date, newest date, and count for each interval
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import asyncpg
|
||||
import os
|
||||
from datetime import datetime
|
||||
|
||||
async def check_database_stats():
|
||||
# Database connection (uses same env vars as your app)
|
||||
conn = await asyncpg.connect(
|
||||
host=os.getenv('DB_HOST', 'localhost'),
|
||||
port=int(os.getenv('DB_PORT', 5432)),
|
||||
database=os.getenv('DB_NAME', 'btc_data'),
|
||||
user=os.getenv('DB_USER', 'btc_bot'),
|
||||
password=os.getenv('DB_PASSWORD', '')
|
||||
)
|
||||
|
||||
try:
|
||||
print("=" * 70)
|
||||
print("DATABASE STATISTICS")
|
||||
print("=" * 70)
|
||||
print()
|
||||
|
||||
# Check for each interval
|
||||
intervals = ['1m', '3m', '5m', '15m', '30m', '37m', '1h', '2h', '4h', '8h', '12h', '1d']
|
||||
|
||||
for interval in intervals:
|
||||
stats = await conn.fetchrow("""
|
||||
SELECT
|
||||
COUNT(*) as count,
|
||||
MIN(time) as oldest,
|
||||
MAX(time) as newest
|
||||
FROM candles
|
||||
WHERE symbol = 'BTC' AND interval = $1
|
||||
""", interval)
|
||||
|
||||
if stats['count'] > 0:
|
||||
oldest = stats['oldest'].strftime('%Y-%m-%d %H:%M') if stats['oldest'] else 'N/A'
|
||||
newest = stats['newest'].strftime('%Y-%m-%d %H:%M') if stats['newest'] else 'N/A'
|
||||
count = stats['count']
|
||||
|
||||
# Calculate days of data
|
||||
if stats['oldest'] and stats['newest']:
|
||||
days = (stats['newest'] - stats['oldest']).days
|
||||
print(f"{interval:6} | {count:>8,} candles | {days:>4} days | {oldest} to {newest}")
|
||||
|
||||
print()
|
||||
print("=" * 70)
|
||||
|
||||
# Check indicators
|
||||
print("\nINDICATORS AVAILABLE:")
|
||||
indicators = await conn.fetch("""
|
||||
SELECT DISTINCT indicator_name, interval, COUNT(*) as count
|
||||
FROM indicators
|
||||
WHERE symbol = 'BTC'
|
||||
GROUP BY indicator_name, interval
|
||||
ORDER BY interval, indicator_name
|
||||
""")
|
||||
|
||||
if indicators:
|
||||
for ind in indicators:
|
||||
print(f" {ind['indicator_name']:10} on {ind['interval']:6} | {ind['count']:>8,} values")
|
||||
else:
|
||||
print(" No indicators found in database")
|
||||
|
||||
print()
|
||||
print("=" * 70)
|
||||
|
||||
# Check 1m specifically with more detail
|
||||
print("\n1-MINUTE DATA DETAIL:")
|
||||
one_min_stats = await conn.fetchrow("""
|
||||
SELECT
|
||||
COUNT(*) as count,
|
||||
MIN(time) as oldest,
|
||||
MAX(time) as newest,
|
||||
COUNT(*) FILTER (WHERE time > NOW() - INTERVAL '24 hours') as last_24h
|
||||
FROM candles
|
||||
WHERE symbol = 'BTC' AND interval = '1m'
|
||||
""")
|
||||
|
||||
if one_min_stats['count'] > 0:
|
||||
total_days = (one_min_stats['newest'] - one_min_stats['oldest']).days
|
||||
expected_candles = total_days * 24 * 60 # 1 candle per minute
|
||||
actual_candles = one_min_stats['count']
|
||||
coverage = (actual_candles / expected_candles) * 100 if expected_candles > 0 else 0
|
||||
|
||||
print(f" Total candles: {actual_candles:,}")
|
||||
print(f" Date range: {one_min_stats['oldest'].strftime('%Y-%m-%d')} to {one_min_stats['newest'].strftime('%Y-%m-%d')}")
|
||||
print(f" Total days: {total_days}")
|
||||
print(f" Expected candles: {expected_candles:,} (if complete)")
|
||||
print(f" Coverage: {coverage:.1f}%")
|
||||
print(f" Last 24 hours: {one_min_stats['last_24h']:,} candles")
|
||||
else:
|
||||
print(" No 1m data found")
|
||||
|
||||
print()
|
||||
print("=" * 70)
|
||||
|
||||
finally:
|
||||
await conn.close()
|
||||
|
||||
if __name__ == "__main__":
|
||||
asyncio.run(check_database_stats())
|
||||
18
scripts/check_status.sh
Normal file
18
scripts/check_status.sh
Normal file
@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
# Check the status of the indicators table (constraints and compression)
|
||||
|
||||
docker exec -i btc_timescale psql -U btc_bot -d btc_data <<EOF
|
||||
\x
|
||||
SELECT 'Checking constraints...' as step;
|
||||
SELECT conname, pg_get_constraintdef(oid)
|
||||
FROM pg_constraint
|
||||
WHERE conrelid = 'indicators'::regclass;
|
||||
|
||||
SELECT 'Checking compression settings...' as step;
|
||||
SELECT * FROM timescaledb_information.hypertables
|
||||
WHERE hypertable_name = 'indicators';
|
||||
|
||||
SELECT 'Checking compression jobs...' as step;
|
||||
SELECT * FROM timescaledb_information.jobs
|
||||
WHERE hypertable_name = 'indicators';
|
||||
EOF
|
||||
54
scripts/fix_indicators_v2.sh
Normal file
54
scripts/fix_indicators_v2.sh
Normal file
@ -0,0 +1,54 @@
|
||||
#!/bin/bash
|
||||
# Fix indicators table schema - Version 2 (Final)
|
||||
# Handles TimescaleDB compression constraints properly
|
||||
|
||||
echo "Fixing indicators table schema (v2)..."
|
||||
|
||||
# 1. Decompress chunks individually (safest method)
|
||||
# We fetch the list of compressed chunks and process them one by one
|
||||
echo "Checking for compressed chunks..."
|
||||
CHUNKS=$(docker exec -i btc_timescale psql -U btc_bot -d btc_data -t -c "SELECT chunk_schema || '.' || chunk_name FROM timescaledb_information.chunks WHERE hypertable_name = 'indicators' AND is_compressed = true;")
|
||||
|
||||
for chunk in $CHUNKS; do
|
||||
# Trim whitespace
|
||||
chunk=$(echo "$chunk" | xargs)
|
||||
if [[ ! -z "$chunk" ]]; then
|
||||
echo "Decompressing chunk: $chunk"
|
||||
docker exec -i btc_timescale psql -U btc_bot -d btc_data -c "SELECT decompress_chunk('$chunk');"
|
||||
fi
|
||||
done
|
||||
|
||||
# 2. Execute the schema changes
|
||||
docker exec -i btc_timescale psql -U btc_bot -d btc_data <<EOF
|
||||
BEGIN;
|
||||
|
||||
-- Remove policy first
|
||||
SELECT remove_compression_policy('indicators', if_exists => true);
|
||||
|
||||
-- Disable compression setting (REQUIRED to add unique constraint)
|
||||
ALTER TABLE indicators SET (timescaledb.compress = false);
|
||||
|
||||
-- Deduplicate data (just in case duplicates exist)
|
||||
DELETE FROM indicators a USING indicators b
|
||||
WHERE a.ctid < b.ctid
|
||||
AND a.time = b.time
|
||||
AND a.symbol = b.symbol
|
||||
AND a.interval = b.interval
|
||||
AND a.indicator_name = b.indicator_name;
|
||||
|
||||
-- Add the unique constraint
|
||||
ALTER TABLE indicators ADD CONSTRAINT indicators_unique UNIQUE (time, symbol, interval, indicator_name);
|
||||
|
||||
-- Re-enable compression configuration
|
||||
ALTER TABLE indicators SET (
|
||||
timescaledb.compress,
|
||||
timescaledb.compress_segmentby = 'symbol,interval,indicator_name'
|
||||
);
|
||||
|
||||
-- Re-add compression policy (7 days)
|
||||
SELECT add_compression_policy('indicators', INTERVAL '7 days', if_not_exists => true);
|
||||
|
||||
COMMIT;
|
||||
|
||||
SELECT 'Indicators schema fix v2 completed successfully' as status;
|
||||
EOF
|
||||
11
scripts/run_test.sh
Normal file
11
scripts/run_test.sh
Normal file
@ -0,0 +1,11 @@
|
||||
#!/bin/bash
|
||||
# Run performance test inside Docker container
|
||||
# Usage: ./run_test.sh [days] [interval]
|
||||
|
||||
DAYS=${1:-7}
|
||||
INTERVAL=${2:-1m}
|
||||
|
||||
echo "Running MA44 performance test: ${DAYS} days of ${INTERVAL} data"
|
||||
echo "=================================================="
|
||||
|
||||
docker exec btc_collector python scripts/test_ma44_performance.py --days $DAYS --interval $INTERVAL
|
||||
187
scripts/test_ma44_performance.py
Normal file
187
scripts/test_ma44_performance.py
Normal file
@ -0,0 +1,187 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Performance Test Script for MA44 Strategy
|
||||
Tests backtesting performance on Synology DS218+ with 6GB RAM
|
||||
|
||||
Usage:
|
||||
python test_ma44_performance.py [--days DAYS] [--interval INTERVAL]
|
||||
|
||||
Example:
|
||||
python test_ma44_performance.py --days 7 --interval 1m
|
||||
"""
|
||||
|
||||
import asyncio
|
||||
import argparse
|
||||
import time
|
||||
import sys
|
||||
import os
|
||||
from datetime import datetime, timedelta, timezone
|
||||
|
||||
# Add src to path
|
||||
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'src'))
|
||||
|
||||
from data_collector.database import DatabaseManager
|
||||
from data_collector.indicator_engine import IndicatorEngine, IndicatorConfig
|
||||
from data_collector.brain import Brain
|
||||
from data_collector.backtester import Backtester
|
||||
|
||||
|
||||
async def run_performance_test(days: int = 7, interval: str = "1m"):
|
||||
"""Run MA44 backtest and measure performance"""
|
||||
|
||||
print("=" * 70)
|
||||
print(f"PERFORMANCE TEST: MA44 Strategy")
|
||||
print(f"Timeframe: {interval}")
|
||||
print(f"Period: Last {days} days")
|
||||
print(f"Hardware: Synology DS218+ (6GB RAM)")
|
||||
print("=" * 70)
|
||||
print()
|
||||
|
||||
# Database connection (adjust these if needed)
|
||||
db = DatabaseManager(
|
||||
host=os.getenv('DB_HOST', 'localhost'),
|
||||
port=int(os.getenv('DB_PORT', 5432)),
|
||||
database=os.getenv('DB_NAME', 'btc_data'),
|
||||
user=os.getenv('DB_USER', 'btc_bot'),
|
||||
password=os.getenv('DB_PASSWORD', '')
|
||||
)
|
||||
|
||||
try:
|
||||
await db.connect()
|
||||
print("✓ Database connected")
|
||||
|
||||
# Calculate date range
|
||||
end_date = datetime.now(timezone.utc)
|
||||
start_date = end_date - timedelta(days=days)
|
||||
|
||||
print(f"✓ Date range: {start_date.date()} to {end_date.date()}")
|
||||
print(f"✓ Symbol: BTC")
|
||||
print(f"✓ Strategy: MA44 (44-period SMA)")
|
||||
print()
|
||||
|
||||
# Check data availability
|
||||
async with db.acquire() as conn:
|
||||
count = await conn.fetchval("""
|
||||
SELECT COUNT(*) FROM candles
|
||||
WHERE symbol = 'BTC'
|
||||
AND interval = $1
|
||||
AND time >= $2
|
||||
AND time <= $3
|
||||
""", interval, start_date, end_date)
|
||||
|
||||
print(f"📊 Data points: {count:,} {interval} candles")
|
||||
|
||||
if count == 0:
|
||||
print("❌ ERROR: No data found for this period!")
|
||||
print(f" Run: python -m data_collector.backfill --days {days} --intervals {interval}")
|
||||
return
|
||||
|
||||
print(f" (Expected: ~{count * int(interval.replace('m','').replace('h','').replace('d',''))} minutes of data)")
|
||||
print()
|
||||
|
||||
# Setup indicator configuration
|
||||
indicator_configs = [
|
||||
IndicatorConfig("ma44", "sma", 44, [interval])
|
||||
]
|
||||
|
||||
engine = IndicatorEngine(db, indicator_configs)
|
||||
brain = Brain(db, engine)
|
||||
backtester = Backtester(db, engine, brain)
|
||||
|
||||
print("⚙️ Running backtest...")
|
||||
print("-" * 70)
|
||||
|
||||
# Measure execution time
|
||||
start_time = time.time()
|
||||
|
||||
await backtester.run("BTC", [interval], start_date, end_date)
|
||||
|
||||
end_time = time.time()
|
||||
execution_time = end_time - start_time
|
||||
|
||||
print("-" * 70)
|
||||
print()
|
||||
|
||||
# Fetch results from database
|
||||
async with db.acquire() as conn:
|
||||
latest_backtest = await conn.fetchrow("""
|
||||
SELECT id, strategy, start_time, end_time, intervals, results, created_at
|
||||
FROM backtest_runs
|
||||
WHERE strategy LIKE '%ma44%'
|
||||
ORDER BY created_at DESC
|
||||
LIMIT 1
|
||||
""")
|
||||
|
||||
if latest_backtest and latest_backtest['results']:
|
||||
import json
|
||||
results = json.loads(latest_backtest['results'])
|
||||
|
||||
print("📈 RESULTS:")
|
||||
print("=" * 70)
|
||||
print(f" Total Trades: {results.get('total_trades', 'N/A')}")
|
||||
print(f" Win Rate: {results.get('win_rate', 0):.1f}%")
|
||||
print(f" Win Count: {results.get('win_count', 0)}")
|
||||
print(f" Loss Count: {results.get('loss_count', 0)}")
|
||||
print(f" Total P&L: ${results.get('total_pnl', 0):.2f}")
|
||||
print(f" P&L Percent: {results.get('total_pnl_pct', 0):.2f}%")
|
||||
print(f" Initial Balance: ${results.get('initial_balance', 1000):.2f}")
|
||||
print(f" Final Balance: ${results.get('final_balance', 1000):.2f}")
|
||||
print(f" Max Drawdown: {results.get('max_drawdown', 0):.2f}%")
|
||||
print()
|
||||
print("⏱️ PERFORMANCE:")
|
||||
print(f" Execution Time: {execution_time:.2f} seconds")
|
||||
print(f" Candles/Second: {count / execution_time:.0f}")
|
||||
print(f" Backtest ID: {latest_backtest['id']}")
|
||||
print()
|
||||
|
||||
# Performance assessment
|
||||
if execution_time < 30:
|
||||
print("✅ PERFORMANCE: Excellent (< 30s)")
|
||||
elif execution_time < 60:
|
||||
print("✅ PERFORMANCE: Good (< 60s)")
|
||||
elif execution_time < 300:
|
||||
print("⚠️ PERFORMANCE: Acceptable (1-5 min)")
|
||||
else:
|
||||
print("❌ PERFORMANCE: Slow (> 5 min) - Consider shorter periods or higher TFs")
|
||||
|
||||
print()
|
||||
print("💡 RECOMMENDATIONS:")
|
||||
if execution_time > 60:
|
||||
print(" • For faster results, use higher timeframes (15m, 1h, 4h)")
|
||||
print(" • Or reduce date range (< 7 days)")
|
||||
else:
|
||||
print(" • Hardware is sufficient for this workload")
|
||||
print(" • Can handle larger date ranges or multiple timeframes")
|
||||
|
||||
else:
|
||||
print("❌ ERROR: No results found in database!")
|
||||
print(" The backtest may have failed. Check server logs.")
|
||||
|
||||
except Exception as e:
|
||||
print(f"\n❌ ERROR: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
|
||||
finally:
|
||||
await db.disconnect()
|
||||
print()
|
||||
print("=" * 70)
|
||||
print("Test completed")
|
||||
print("=" * 70)
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description='Test MA44 backtest performance')
|
||||
parser.add_argument('--days', type=int, default=7,
|
||||
help='Number of days to backtest (default: 7)')
|
||||
parser.add_argument('--interval', type=str, default='1m',
|
||||
help='Candle interval (default: 1m)')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Run the async test
|
||||
asyncio.run(run_performance_test(args.days, args.interval))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
87
scripts/update_schema.sh
Normal file
87
scripts/update_schema.sh
Normal file
@ -0,0 +1,87 @@
|
||||
#!/bin/bash
|
||||
# Apply schema updates to a running TimescaleDB container without wiping data
|
||||
|
||||
echo "Applying schema updates to btc_timescale container..."
|
||||
|
||||
# Execute the schema SQL inside the container
|
||||
# We use psql with the environment variables set in docker-compose
|
||||
docker exec -i btc_timescale psql -U btc_bot -d btc_data <<EOF
|
||||
-- 1. Unique constraint for indicators (if not exists)
|
||||
DO \$\$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT 1 FROM pg_constraint WHERE conname = 'indicators_unique') THEN
|
||||
ALTER TABLE indicators ADD CONSTRAINT indicators_unique UNIQUE (time, symbol, interval, indicator_name);
|
||||
END IF;
|
||||
END \$\$;
|
||||
|
||||
-- 2. Index for indicators
|
||||
CREATE INDEX IF NOT EXISTS idx_indicators_lookup ON indicators (symbol, interval, indicator_name, time DESC);
|
||||
|
||||
-- 3. Data health view update
|
||||
CREATE OR REPLACE VIEW data_health AS
|
||||
SELECT
|
||||
symbol,
|
||||
COUNT(*) as total_candles,
|
||||
COUNT(*) FILTER (WHERE validated) as validated_candles,
|
||||
MAX(time) as latest_candle,
|
||||
MIN(time) as earliest_candle,
|
||||
NOW() - MAX(time) as time_since_last
|
||||
FROM candles
|
||||
GROUP BY symbol;
|
||||
|
||||
-- 4. Decisions table
|
||||
CREATE TABLE IF NOT EXISTS decisions (
|
||||
time TIMESTAMPTZ NOT NULL,
|
||||
symbol TEXT NOT NULL,
|
||||
interval TEXT NOT NULL,
|
||||
decision_type TEXT NOT NULL,
|
||||
strategy TEXT NOT NULL,
|
||||
confidence DECIMAL(5,4),
|
||||
price_at_decision DECIMAL(18,8),
|
||||
indicator_snapshot JSONB NOT NULL,
|
||||
candle_snapshot JSONB NOT NULL,
|
||||
reasoning TEXT,
|
||||
backtest_id TEXT,
|
||||
executed BOOLEAN DEFAULT FALSE,
|
||||
execution_price DECIMAL(18,8),
|
||||
execution_time TIMESTAMPTZ,
|
||||
created_at TIMESTAMPTZ DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- 5. Decisions hypertable (ignore error if already exists)
|
||||
DO \$\$
|
||||
BEGIN
|
||||
PERFORM create_hypertable('decisions', 'time', chunk_time_interval => INTERVAL '7 days', if_not_exists => TRUE);
|
||||
EXCEPTION WHEN OTHERS THEN
|
||||
NULL; -- Ignore if already hypertable
|
||||
END \$\$;
|
||||
|
||||
-- 6. Decisions indexes
|
||||
CREATE INDEX IF NOT EXISTS idx_decisions_live ON decisions (symbol, interval, time DESC) WHERE backtest_id IS NULL;
|
||||
CREATE INDEX IF NOT EXISTS idx_decisions_backtest ON decisions (backtest_id, symbol, time DESC) WHERE backtest_id IS NOT NULL;
|
||||
CREATE INDEX IF NOT EXISTS idx_decisions_type ON decisions (symbol, decision_type, time DESC);
|
||||
|
||||
-- 7. Backtest runs table
|
||||
CREATE TABLE IF NOT EXISTS backtest_runs (
|
||||
id TEXT PRIMARY KEY,
|
||||
strategy TEXT NOT NULL,
|
||||
symbol TEXT NOT NULL DEFAULT 'BTC',
|
||||
start_time TIMESTAMPTZ NOT NULL,
|
||||
end_time TIMESTAMPTZ NOT NULL,
|
||||
intervals TEXT[] NOT NULL,
|
||||
config JSONB,
|
||||
results JSONB,
|
||||
created_at TIMESTAMPTZ DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- 8. Compression policies
|
||||
DO \$\$
|
||||
BEGIN
|
||||
ALTER TABLE decisions SET (timescaledb.compress, timescaledb.compress_segmentby = 'symbol,interval,strategy');
|
||||
PERFORM add_compression_policy('decisions', INTERVAL '7 days', if_not_exists => TRUE);
|
||||
EXCEPTION WHEN OTHERS THEN
|
||||
NULL; -- Ignore compression errors if already set
|
||||
END \$\$;
|
||||
|
||||
SELECT 'Schema update completed successfully' as status;
|
||||
EOF
|
||||
Reference in New Issue
Block a user