docs: add DOCKER_GUIDE.md and fix .env parsing; chore: update docker and script configurations

This commit is contained in:
BTC Bot
2026-03-05 08:20:18 +01:00
parent e41afcf005
commit 30aeda0901
25 changed files with 1806 additions and 0 deletions

36
scripts/backfill.sh Normal file
View File

@ -0,0 +1,36 @@
#!/bin/bash
# Backfill script for Hyperliquid historical data
# Usage: ./backfill.sh [coin] [days|max] [intervals...]
# Examples:
# ./backfill.sh BTC 7 "1m" # Last 7 days of 1m candles
# ./backfill.sh BTC max "1m 1h 1d" # Maximum available data for all intervals
set -e
COIN=${1:-BTC}
DAYS=${2:-7}
INTERVALS=${3:-"1m"}
echo "=== Hyperliquid Historical Data Backfill ==="
echo "Coin: $COIN"
if [ "$DAYS" == "max" ]; then
echo "Mode: MAXIMUM (up to 5000 candles per interval)"
else
echo "Days: $DAYS"
fi
echo "Intervals: $INTERVALS"
echo ""
# Change to project root
cd "$(dirname "$0")/.."
# Run backfill inside Docker container
docker exec btc_collector python -m src.data_collector.backfill \
--coin "$COIN" \
--days "$DAYS" \
--intervals $INTERVALS \
--db-host localhost \
--db-port 5433
echo ""
echo "=== Backfill Complete ==="

37
scripts/backup.sh Normal file
View File

@ -0,0 +1,37 @@
#!/bin/bash
# Backup script for Synology DS218+
# Run via Task Scheduler every 6 hours
BACKUP_DIR="/volume1/btc_bot/backups"
DB_NAME="btc_data"
DB_USER="btc_bot"
RETENTION_DAYS=30
DATE=$(date +%Y%m%d_%H%M)
echo "Starting backup at $(date)"
# Create backup directory if not exists
mkdir -p $BACKUP_DIR
# Create backup
docker exec btc_timescale pg_dump -U $DB_USER -Fc $DB_NAME > $BACKUP_DIR/btc_data_$DATE.dump
# Compress
if [ -f "$BACKUP_DIR/btc_data_$DATE.dump" ]; then
gzip $BACKUP_DIR/btc_data_$DATE.dump
echo "Backup created: btc_data_$DATE.dump.gz"
# Calculate size
SIZE=$(du -h $BACKUP_DIR/btc_data_$DATE.dump.gz | cut -f1)
echo "Backup size: $SIZE"
else
echo "Error: Backup file not created"
exit 1
fi
# Delete old backups
DELETED=$(find $BACKUP_DIR -name "*.dump.gz" -mtime +$RETENTION_DAYS | wc -l)
find $BACKUP_DIR -name "*.dump.gz" -mtime +$RETENTION_DAYS -delete
echo "Deleted $DELETED old backup(s)"
echo "Backup completed at $(date)"

107
scripts/check_db_stats.py Normal file
View File

@ -0,0 +1,107 @@
#!/usr/bin/env python3
"""
Quick database statistics checker
Shows oldest date, newest date, and count for each interval
"""
import asyncio
import asyncpg
import os
from datetime import datetime
async def check_database_stats():
# Database connection (uses same env vars as your app)
conn = await asyncpg.connect(
host=os.getenv('DB_HOST', 'localhost'),
port=int(os.getenv('DB_PORT', 5432)),
database=os.getenv('DB_NAME', 'btc_data'),
user=os.getenv('DB_USER', 'btc_bot'),
password=os.getenv('DB_PASSWORD', '')
)
try:
print("=" * 70)
print("DATABASE STATISTICS")
print("=" * 70)
print()
# Check for each interval
intervals = ['1m', '3m', '5m', '15m', '30m', '37m', '1h', '2h', '4h', '8h', '12h', '1d']
for interval in intervals:
stats = await conn.fetchrow("""
SELECT
COUNT(*) as count,
MIN(time) as oldest,
MAX(time) as newest
FROM candles
WHERE symbol = 'BTC' AND interval = $1
""", interval)
if stats['count'] > 0:
oldest = stats['oldest'].strftime('%Y-%m-%d %H:%M') if stats['oldest'] else 'N/A'
newest = stats['newest'].strftime('%Y-%m-%d %H:%M') if stats['newest'] else 'N/A'
count = stats['count']
# Calculate days of data
if stats['oldest'] and stats['newest']:
days = (stats['newest'] - stats['oldest']).days
print(f"{interval:6} | {count:>8,} candles | {days:>4} days | {oldest} to {newest}")
print()
print("=" * 70)
# Check indicators
print("\nINDICATORS AVAILABLE:")
indicators = await conn.fetch("""
SELECT DISTINCT indicator_name, interval, COUNT(*) as count
FROM indicators
WHERE symbol = 'BTC'
GROUP BY indicator_name, interval
ORDER BY interval, indicator_name
""")
if indicators:
for ind in indicators:
print(f" {ind['indicator_name']:10} on {ind['interval']:6} | {ind['count']:>8,} values")
else:
print(" No indicators found in database")
print()
print("=" * 70)
# Check 1m specifically with more detail
print("\n1-MINUTE DATA DETAIL:")
one_min_stats = await conn.fetchrow("""
SELECT
COUNT(*) as count,
MIN(time) as oldest,
MAX(time) as newest,
COUNT(*) FILTER (WHERE time > NOW() - INTERVAL '24 hours') as last_24h
FROM candles
WHERE symbol = 'BTC' AND interval = '1m'
""")
if one_min_stats['count'] > 0:
total_days = (one_min_stats['newest'] - one_min_stats['oldest']).days
expected_candles = total_days * 24 * 60 # 1 candle per minute
actual_candles = one_min_stats['count']
coverage = (actual_candles / expected_candles) * 100 if expected_candles > 0 else 0
print(f" Total candles: {actual_candles:,}")
print(f" Date range: {one_min_stats['oldest'].strftime('%Y-%m-%d')} to {one_min_stats['newest'].strftime('%Y-%m-%d')}")
print(f" Total days: {total_days}")
print(f" Expected candles: {expected_candles:,} (if complete)")
print(f" Coverage: {coverage:.1f}%")
print(f" Last 24 hours: {one_min_stats['last_24h']:,} candles")
else:
print(" No 1m data found")
print()
print("=" * 70)
finally:
await conn.close()
if __name__ == "__main__":
asyncio.run(check_database_stats())

18
scripts/check_status.sh Normal file
View File

@ -0,0 +1,18 @@
#!/bin/bash
# Check the status of the indicators table (constraints and compression)
docker exec -i btc_timescale psql -U btc_bot -d btc_data <<EOF
\x
SELECT 'Checking constraints...' as step;
SELECT conname, pg_get_constraintdef(oid)
FROM pg_constraint
WHERE conrelid = 'indicators'::regclass;
SELECT 'Checking compression settings...' as step;
SELECT * FROM timescaledb_information.hypertables
WHERE hypertable_name = 'indicators';
SELECT 'Checking compression jobs...' as step;
SELECT * FROM timescaledb_information.jobs
WHERE hypertable_name = 'indicators';
EOF

59
scripts/deploy.sh Normal file
View File

@ -0,0 +1,59 @@
#!/bin/bash
# Deployment script for Synology DS218+
set -e
echo "=== BTC Bot Data Collector Deployment ==="
echo ""
# Check if running on Synology
if [ ! -d "/volume1" ]; then
echo "Warning: This script is designed for Synology NAS"
echo "Continuing anyway..."
fi
# Create directories
echo "Creating directories..."
mkdir -p /volume1/btc_bot/{data,backups,logs,exports}
# Check if Docker is installed
if ! command -v docker &> /dev/null; then
echo "Error: Docker not found. Please install Docker package from Synology Package Center"
exit 1
fi
# Copy configuration
echo "Setting up configuration..."
if [ ! -f "/volume1/btc_bot/.env" ]; then
cp .env.example /volume1/btc_bot/.env
echo "Created .env file. Please edit /volume1/btc_bot/.env with your settings"
fi
# Build and start services
echo "Building and starting services..."
cd docker
docker-compose pull
docker-compose build --no-cache
docker-compose up -d
# Wait for database
echo "Waiting for database to be ready..."
sleep 10
# Check status
echo ""
echo "=== Status ==="
docker-compose ps
echo ""
echo "=== Logs (last 20 lines) ==="
docker-compose logs --tail=20
echo ""
echo "=== Deployment Complete ==="
echo "Database available at: localhost:5432"
echo "API available at: http://localhost:8000"
echo ""
echo "To view logs: docker-compose logs -f"
echo "To stop: docker-compose down"
echo "To backup: ./scripts/backup.sh"

View File

@ -0,0 +1,54 @@
#!/bin/bash
# Fix indicators table schema - Version 2 (Final)
# Handles TimescaleDB compression constraints properly
echo "Fixing indicators table schema (v2)..."
# 1. Decompress chunks individually (safest method)
# We fetch the list of compressed chunks and process them one by one
echo "Checking for compressed chunks..."
CHUNKS=$(docker exec -i btc_timescale psql -U btc_bot -d btc_data -t -c "SELECT chunk_schema || '.' || chunk_name FROM timescaledb_information.chunks WHERE hypertable_name = 'indicators' AND is_compressed = true;")
for chunk in $CHUNKS; do
# Trim whitespace
chunk=$(echo "$chunk" | xargs)
if [[ ! -z "$chunk" ]]; then
echo "Decompressing chunk: $chunk"
docker exec -i btc_timescale psql -U btc_bot -d btc_data -c "SELECT decompress_chunk('$chunk');"
fi
done
# 2. Execute the schema changes
docker exec -i btc_timescale psql -U btc_bot -d btc_data <<EOF
BEGIN;
-- Remove policy first
SELECT remove_compression_policy('indicators', if_exists => true);
-- Disable compression setting (REQUIRED to add unique constraint)
ALTER TABLE indicators SET (timescaledb.compress = false);
-- Deduplicate data (just in case duplicates exist)
DELETE FROM indicators a USING indicators b
WHERE a.ctid < b.ctid
AND a.time = b.time
AND a.symbol = b.symbol
AND a.interval = b.interval
AND a.indicator_name = b.indicator_name;
-- Add the unique constraint
ALTER TABLE indicators ADD CONSTRAINT indicators_unique UNIQUE (time, symbol, interval, indicator_name);
-- Re-enable compression configuration
ALTER TABLE indicators SET (
timescaledb.compress,
timescaledb.compress_segmentby = 'symbol,interval,indicator_name'
);
-- Re-add compression policy (7 days)
SELECT add_compression_policy('indicators', INTERVAL '7 days', if_not_exists => true);
COMMIT;
SELECT 'Indicators schema fix v2 completed successfully' as status;
EOF

View File

@ -0,0 +1,65 @@
import asyncio
import logging
import os
import sys
# Add src to path
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
from src.data_collector.database import DatabaseManager
from src.data_collector.custom_timeframe_generator import CustomTimeframeGenerator
# Configure logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
async def main():
logger.info("Starting custom timeframe generation...")
# DB connection settings from env or defaults
db_host = os.getenv('DB_HOST', 'localhost')
db_port = int(os.getenv('DB_PORT', 5432))
db_name = os.getenv('DB_NAME', 'btc_data')
db_user = os.getenv('DB_USER', 'btc_bot')
db_password = os.getenv('DB_PASSWORD', '')
db = DatabaseManager(
host=db_host,
port=db_port,
database=db_name,
user=db_user,
password=db_password
)
await db.connect()
try:
generator = CustomTimeframeGenerator(db)
await generator.initialize()
# Generate 37m from 1m
logger.info("Generating 37m candles from 1m data...")
count_37m = await generator.generate_historical('37m')
logger.info(f"Generated {count_37m} candles for 37m")
# Generate 148m from 37m
# Note: 148m generation relies on 37m data existing
logger.info("Generating 148m candles from 37m data...")
count_148m = await generator.generate_historical('148m')
logger.info(f"Generated {count_148m} candles for 148m")
logger.info("Done!")
except Exception as e:
logger.error(f"Error generating custom timeframes: {e}")
import traceback
traceback.print_exc()
finally:
await db.disconnect()
if __name__ == "__main__":
asyncio.run(main())

View File

@ -0,0 +1,87 @@
#!/usr/bin/env python3
"""
Generate custom timeframes (37m, 148m) from historical 1m data
Run once to backfill all historical data
"""
import asyncio
import argparse
import logging
import sys
from pathlib import Path
# Add parent to path
sys.path.insert(0, str(Path(__file__).parent.parent / 'src'))
from data_collector.database import DatabaseManager
from data_collector.custom_timeframe_generator import CustomTimeframeGenerator
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
async def main():
parser = argparse.ArgumentParser(description='Generate custom timeframe candles')
parser.add_argument('--interval',
default='all',
help='Which interval to generate (default: all, choices: 3m, 5m, 1h, 37m, etc.)')
parser.add_argument('--batch-size', type=int, default=5000,
help='Number of source candles per batch')
parser.add_argument('--verify', action='store_true',
help='Verify integrity after generation')
args = parser.parse_args()
# Initialize database
db = DatabaseManager()
await db.connect()
try:
generator = CustomTimeframeGenerator(db)
await generator.initialize()
if not generator.first_1m_time:
logger.error("No 1m data found in database. Cannot generate custom timeframes.")
return 1
if args.interval == 'all':
intervals = list(generator.STANDARD_INTERVALS.keys()) + list(generator.CUSTOM_INTERVALS.keys())
else:
intervals = [args.interval]
for interval in intervals:
logger.info(f"=" * 60)
logger.info(f"Generating {interval} candles")
logger.info(f"=" * 60)
# Generate historical data
count = await generator.generate_historical(
interval=interval,
batch_size=args.batch_size
)
logger.info(f"Generated {count} {interval} candles")
# Verify if requested
if args.verify:
logger.info(f"Verifying {interval} integrity...")
stats = await generator.verify_integrity(interval)
logger.info(f"Stats: {stats}")
except Exception as e:
logger.error(f"Error: {e}", exc_info=True)
return 1
finally:
await db.disconnect()
logger.info("Custom timeframe generation complete!")
return 0
if __name__ == '__main__':
exit_code = asyncio.run(main())
sys.exit(exit_code)

31
scripts/health_check.sh Normal file
View File

@ -0,0 +1,31 @@
#!/bin/bash
# Health check script for cron/scheduler
# Check if containers are running
if ! docker ps | grep -q "btc_timescale"; then
echo "ERROR: TimescaleDB container not running"
# Send notification (if configured)
exit 1
fi
if ! docker ps | grep -q "btc_collector"; then
echo "ERROR: Data collector container not running"
exit 1
fi
# Check database connectivity
docker exec btc_timescale pg_isready -U btc_bot -d btc_data > /dev/null 2>&1
if [ $? -ne 0 ]; then
echo "ERROR: Cannot connect to database"
exit 1
fi
# Check if recent data exists
LATEST=$(docker exec btc_timescale psql -U btc_bot -d btc_data -t -c "SELECT MAX(time) FROM candles WHERE time > NOW() - INTERVAL '5 minutes';" 2>/dev/null)
if [ -z "$LATEST" ]; then
echo "WARNING: No recent data in database"
exit 1
fi
echo "OK: All systems operational"
exit 0

11
scripts/run_test.sh Normal file
View File

@ -0,0 +1,11 @@
#!/bin/bash
# Run performance test inside Docker container
# Usage: ./run_test.sh [days] [interval]
DAYS=${1:-7}
INTERVAL=${2:-1m}
echo "Running MA44 performance test: ${DAYS} days of ${INTERVAL} data"
echo "=================================================="
docker exec btc_collector python scripts/test_ma44_performance.py --days $DAYS --interval $INTERVAL

View File

@ -0,0 +1,187 @@
#!/usr/bin/env python3
"""
Performance Test Script for MA44 Strategy
Tests backtesting performance on Synology DS218+ with 6GB RAM
Usage:
python test_ma44_performance.py [--days DAYS] [--interval INTERVAL]
Example:
python test_ma44_performance.py --days 7 --interval 1m
"""
import asyncio
import argparse
import time
import sys
import os
from datetime import datetime, timedelta, timezone
# Add src to path
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'src'))
from data_collector.database import DatabaseManager
from data_collector.indicator_engine import IndicatorEngine, IndicatorConfig
from data_collector.brain import Brain
from data_collector.backtester import Backtester
async def run_performance_test(days: int = 7, interval: str = "1m"):
"""Run MA44 backtest and measure performance"""
print("=" * 70)
print(f"PERFORMANCE TEST: MA44 Strategy")
print(f"Timeframe: {interval}")
print(f"Period: Last {days} days")
print(f"Hardware: Synology DS218+ (6GB RAM)")
print("=" * 70)
print()
# Database connection (adjust these if needed)
db = DatabaseManager(
host=os.getenv('DB_HOST', 'localhost'),
port=int(os.getenv('DB_PORT', 5432)),
database=os.getenv('DB_NAME', 'btc_data'),
user=os.getenv('DB_USER', 'btc_bot'),
password=os.getenv('DB_PASSWORD', '')
)
try:
await db.connect()
print("✓ Database connected")
# Calculate date range
end_date = datetime.now(timezone.utc)
start_date = end_date - timedelta(days=days)
print(f"✓ Date range: {start_date.date()} to {end_date.date()}")
print(f"✓ Symbol: BTC")
print(f"✓ Strategy: MA44 (44-period SMA)")
print()
# Check data availability
async with db.acquire() as conn:
count = await conn.fetchval("""
SELECT COUNT(*) FROM candles
WHERE symbol = 'BTC'
AND interval = $1
AND time >= $2
AND time <= $3
""", interval, start_date, end_date)
print(f"📊 Data points: {count:,} {interval} candles")
if count == 0:
print("❌ ERROR: No data found for this period!")
print(f" Run: python -m data_collector.backfill --days {days} --intervals {interval}")
return
print(f" (Expected: ~{count * int(interval.replace('m','').replace('h','').replace('d',''))} minutes of data)")
print()
# Setup indicator configuration
indicator_configs = [
IndicatorConfig("ma44", "sma", 44, [interval])
]
engine = IndicatorEngine(db, indicator_configs)
brain = Brain(db, engine)
backtester = Backtester(db, engine, brain)
print("⚙️ Running backtest...")
print("-" * 70)
# Measure execution time
start_time = time.time()
await backtester.run("BTC", [interval], start_date, end_date)
end_time = time.time()
execution_time = end_time - start_time
print("-" * 70)
print()
# Fetch results from database
async with db.acquire() as conn:
latest_backtest = await conn.fetchrow("""
SELECT id, strategy, start_time, end_time, intervals, results, created_at
FROM backtest_runs
WHERE strategy LIKE '%ma44%'
ORDER BY created_at DESC
LIMIT 1
""")
if latest_backtest and latest_backtest['results']:
import json
results = json.loads(latest_backtest['results'])
print("📈 RESULTS:")
print("=" * 70)
print(f" Total Trades: {results.get('total_trades', 'N/A')}")
print(f" Win Rate: {results.get('win_rate', 0):.1f}%")
print(f" Win Count: {results.get('win_count', 0)}")
print(f" Loss Count: {results.get('loss_count', 0)}")
print(f" Total P&L: ${results.get('total_pnl', 0):.2f}")
print(f" P&L Percent: {results.get('total_pnl_pct', 0):.2f}%")
print(f" Initial Balance: ${results.get('initial_balance', 1000):.2f}")
print(f" Final Balance: ${results.get('final_balance', 1000):.2f}")
print(f" Max Drawdown: {results.get('max_drawdown', 0):.2f}%")
print()
print("⏱️ PERFORMANCE:")
print(f" Execution Time: {execution_time:.2f} seconds")
print(f" Candles/Second: {count / execution_time:.0f}")
print(f" Backtest ID: {latest_backtest['id']}")
print()
# Performance assessment
if execution_time < 30:
print("✅ PERFORMANCE: Excellent (< 30s)")
elif execution_time < 60:
print("✅ PERFORMANCE: Good (< 60s)")
elif execution_time < 300:
print("⚠️ PERFORMANCE: Acceptable (1-5 min)")
else:
print("❌ PERFORMANCE: Slow (> 5 min) - Consider shorter periods or higher TFs")
print()
print("💡 RECOMMENDATIONS:")
if execution_time > 60:
print(" • For faster results, use higher timeframes (15m, 1h, 4h)")
print(" • Or reduce date range (< 7 days)")
else:
print(" • Hardware is sufficient for this workload")
print(" • Can handle larger date ranges or multiple timeframes")
else:
print("❌ ERROR: No results found in database!")
print(" The backtest may have failed. Check server logs.")
except Exception as e:
print(f"\n❌ ERROR: {e}")
import traceback
traceback.print_exc()
finally:
await db.disconnect()
print()
print("=" * 70)
print("Test completed")
print("=" * 70)
def main():
parser = argparse.ArgumentParser(description='Test MA44 backtest performance')
parser.add_argument('--days', type=int, default=7,
help='Number of days to backtest (default: 7)')
parser.add_argument('--interval', type=str, default='1m',
help='Candle interval (default: 1m)')
args = parser.parse_args()
# Run the async test
asyncio.run(run_performance_test(args.days, args.interval))
if __name__ == "__main__":
main()

87
scripts/update_schema.sh Normal file
View File

@ -0,0 +1,87 @@
#!/bin/bash
# Apply schema updates to a running TimescaleDB container without wiping data
echo "Applying schema updates to btc_timescale container..."
# Execute the schema SQL inside the container
# We use psql with the environment variables set in docker-compose
docker exec -i btc_timescale psql -U btc_bot -d btc_data <<EOF
-- 1. Unique constraint for indicators (if not exists)
DO \$\$
BEGIN
IF NOT EXISTS (SELECT 1 FROM pg_constraint WHERE conname = 'indicators_unique') THEN
ALTER TABLE indicators ADD CONSTRAINT indicators_unique UNIQUE (time, symbol, interval, indicator_name);
END IF;
END \$\$;
-- 2. Index for indicators
CREATE INDEX IF NOT EXISTS idx_indicators_lookup ON indicators (symbol, interval, indicator_name, time DESC);
-- 3. Data health view update
CREATE OR REPLACE VIEW data_health AS
SELECT
symbol,
COUNT(*) as total_candles,
COUNT(*) FILTER (WHERE validated) as validated_candles,
MAX(time) as latest_candle,
MIN(time) as earliest_candle,
NOW() - MAX(time) as time_since_last
FROM candles
GROUP BY symbol;
-- 4. Decisions table
CREATE TABLE IF NOT EXISTS decisions (
time TIMESTAMPTZ NOT NULL,
symbol TEXT NOT NULL,
interval TEXT NOT NULL,
decision_type TEXT NOT NULL,
strategy TEXT NOT NULL,
confidence DECIMAL(5,4),
price_at_decision DECIMAL(18,8),
indicator_snapshot JSONB NOT NULL,
candle_snapshot JSONB NOT NULL,
reasoning TEXT,
backtest_id TEXT,
executed BOOLEAN DEFAULT FALSE,
execution_price DECIMAL(18,8),
execution_time TIMESTAMPTZ,
created_at TIMESTAMPTZ DEFAULT NOW()
);
-- 5. Decisions hypertable (ignore error if already exists)
DO \$\$
BEGIN
PERFORM create_hypertable('decisions', 'time', chunk_time_interval => INTERVAL '7 days', if_not_exists => TRUE);
EXCEPTION WHEN OTHERS THEN
NULL; -- Ignore if already hypertable
END \$\$;
-- 6. Decisions indexes
CREATE INDEX IF NOT EXISTS idx_decisions_live ON decisions (symbol, interval, time DESC) WHERE backtest_id IS NULL;
CREATE INDEX IF NOT EXISTS idx_decisions_backtest ON decisions (backtest_id, symbol, time DESC) WHERE backtest_id IS NOT NULL;
CREATE INDEX IF NOT EXISTS idx_decisions_type ON decisions (symbol, decision_type, time DESC);
-- 7. Backtest runs table
CREATE TABLE IF NOT EXISTS backtest_runs (
id TEXT PRIMARY KEY,
strategy TEXT NOT NULL,
symbol TEXT NOT NULL DEFAULT 'BTC',
start_time TIMESTAMPTZ NOT NULL,
end_time TIMESTAMPTZ NOT NULL,
intervals TEXT[] NOT NULL,
config JSONB,
results JSONB,
created_at TIMESTAMPTZ DEFAULT NOW()
);
-- 8. Compression policies
DO \$\$
BEGIN
ALTER TABLE decisions SET (timescaledb.compress, timescaledb.compress_segmentby = 'symbol,interval,strategy');
PERFORM add_compression_policy('decisions', INTERVAL '7 days', if_not_exists => TRUE);
EXCEPTION WHEN OTHERS THEN
NULL; -- Ignore compression errors if already set
END \$\$;
SELECT 'Schema update completed successfully' as status;
EOF

33
scripts/verify_files.sh Normal file
View File

@ -0,0 +1,33 @@
#!/bin/bash
# BTC Bot Dashboard Setup Script
# Run this from ~/btc_bot to verify all files exist
echo "=== BTC Bot File Verification ==="
echo ""
FILES=(
"src/api/server.py"
"src/api/websocket_manager.py"
"src/api/dashboard/static/index.html"
"docker/Dockerfile.api"
"docker/Dockerfile.collector"
)
for file in "${FILES[@]}"; do
if [ -f "$file" ]; then
size=$(stat -f%z "$file" 2>/dev/null || stat -c%s "$file" 2>/dev/null || echo "unknown")
echo "$file (${size} bytes)"
else
echo "$file (MISSING)"
fi
done
echo ""
echo "=== Next Steps ==="
echo "1. If all files exist, rebuild:"
echo " cd ~/btc_bot"
echo " docker build --network host --no-cache -f docker/Dockerfile.api -t btc_api ."
echo " cd docker && docker-compose up -d"
echo ""
echo "2. Check logs:"
echo " docker logs btc_api --tail 20"