Initial commit: BTC Bot with dashboard, TA analysis, and 14 timeframes
This commit is contained in:
22
docker/Dockerfile.api
Normal file
22
docker/Dockerfile.api
Normal file
@ -0,0 +1,22 @@
|
||||
FROM python:3.11-slim
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy requirements first (for better caching)
|
||||
COPY requirements.txt .
|
||||
|
||||
# Install Python dependencies
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# Copy application code
|
||||
COPY src/ ./src/
|
||||
COPY config/ ./config/
|
||||
|
||||
# Set Python path
|
||||
ENV PYTHONPATH=/app
|
||||
|
||||
# Expose API port
|
||||
EXPOSE 8000
|
||||
|
||||
# Run the API server
|
||||
CMD ["uvicorn", "src.api.server:app", "--host", "0.0.0.0", "--port", "8000"]
|
||||
20
docker/Dockerfile.collector
Normal file
20
docker/Dockerfile.collector
Normal file
@ -0,0 +1,20 @@
|
||||
FROM python:3.11-slim
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy requirements first (for better caching)
|
||||
COPY requirements.txt .
|
||||
|
||||
# Install Python dependencies
|
||||
# --no-cache-dir reduces image size
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# Copy application code
|
||||
COPY src/ ./src/
|
||||
COPY config/ ./config/
|
||||
|
||||
# Set Python path
|
||||
ENV PYTHONPATH=/app
|
||||
|
||||
# Run the collector
|
||||
CMD ["python", "-m", "src.data_collector.main"]
|
||||
1
docker/Dockerfile.timescaledb
Normal file
1
docker/Dockerfile.timescaledb
Normal file
@ -0,0 +1 @@
|
||||
timescale/timescaledb:2.11.2-pg15
|
||||
83
docker/docker-compose.yml
Normal file
83
docker/docker-compose.yml
Normal file
@ -0,0 +1,83 @@
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
timescaledb:
|
||||
image: timescale/timescaledb:2.11.2-pg15
|
||||
container_name: btc_timescale
|
||||
environment:
|
||||
POSTGRES_USER: btc_bot
|
||||
POSTGRES_PASSWORD: ${DB_PASSWORD}
|
||||
POSTGRES_DB: btc_data
|
||||
TZ: Europe/Warsaw
|
||||
volumes:
|
||||
- /volume1/btc_bot/data:/var/lib/postgresql/data
|
||||
- /volume1/btc_bot/backups:/backups
|
||||
- ./timescaledb.conf:/etc/postgresql/postgresql.conf
|
||||
- ./init-scripts:/docker-entrypoint-initdb.d
|
||||
ports:
|
||||
- "5433:5432"
|
||||
command: postgres -c config_file=/etc/postgresql/postgresql.conf
|
||||
restart: unless-stopped
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 1.5G
|
||||
reservations:
|
||||
memory: 512M
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U btc_bot -d btc_data"]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
data_collector:
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: docker/Dockerfile.collector
|
||||
image: btc_collector
|
||||
container_name: btc_collector
|
||||
network_mode: host
|
||||
environment:
|
||||
- DB_HOST=localhost
|
||||
- DB_PORT=5433
|
||||
- DB_NAME=btc_data
|
||||
- DB_USER=btc_bot
|
||||
- DB_PASSWORD=${DB_PASSWORD}
|
||||
- LOG_LEVEL=INFO
|
||||
volumes:
|
||||
- /volume1/btc_bot/logs:/app/logs
|
||||
- ../config:/app/config:ro
|
||||
depends_on:
|
||||
timescaledb:
|
||||
condition: service_healthy
|
||||
restart: unless-stopped
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 256M
|
||||
reservations:
|
||||
memory: 128M
|
||||
|
||||
api_server:
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: docker/Dockerfile.api
|
||||
image: btc_api
|
||||
container_name: btc_api
|
||||
network_mode: host
|
||||
environment:
|
||||
- DB_HOST=localhost
|
||||
- DB_PORT=5433
|
||||
- DB_NAME=btc_data
|
||||
- DB_USER=btc_bot
|
||||
- DB_PASSWORD=${DB_PASSWORD}
|
||||
volumes:
|
||||
- /volume1/btc_bot/exports:/app/exports
|
||||
- ../config:/app/config:ro
|
||||
depends_on:
|
||||
- timescaledb
|
||||
restart: unless-stopped
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 512M
|
||||
139
docker/init-scripts/01-schema.sql
Normal file
139
docker/init-scripts/01-schema.sql
Normal file
@ -0,0 +1,139 @@
|
||||
-- 1. Enable TimescaleDB extension
|
||||
CREATE EXTENSION IF NOT EXISTS timescaledb;
|
||||
|
||||
-- 2. Create candles table (main data storage)
|
||||
CREATE TABLE IF NOT EXISTS candles (
|
||||
time TIMESTAMPTZ NOT NULL,
|
||||
symbol TEXT NOT NULL,
|
||||
interval TEXT NOT NULL,
|
||||
open DECIMAL(18,8) NOT NULL,
|
||||
high DECIMAL(18,8) NOT NULL,
|
||||
low DECIMAL(18,8) NOT NULL,
|
||||
close DECIMAL(18,8) NOT NULL,
|
||||
volume DECIMAL(18,8) NOT NULL,
|
||||
validated BOOLEAN DEFAULT FALSE,
|
||||
source TEXT DEFAULT 'hyperliquid',
|
||||
created_at TIMESTAMPTZ DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- 3. Convert to hypertable (partitioned by time)
|
||||
SELECT create_hypertable('candles', 'time',
|
||||
chunk_time_interval => INTERVAL '7 days',
|
||||
if_not_exists => TRUE
|
||||
);
|
||||
|
||||
-- 4. Create unique constraint for upserts (required by ON CONFLICT)
|
||||
ALTER TABLE candles
|
||||
ADD CONSTRAINT candles_unique_candle
|
||||
UNIQUE (time, symbol, interval);
|
||||
|
||||
-- 5. Create indexes for efficient queries
|
||||
CREATE INDEX IF NOT EXISTS idx_candles_symbol_time
|
||||
ON candles (symbol, interval, time DESC);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_candles_validated
|
||||
ON candles (validated) WHERE validated = FALSE;
|
||||
|
||||
-- 5. Create indicators table (computed values)
|
||||
CREATE TABLE IF NOT EXISTS indicators (
|
||||
time TIMESTAMPTZ NOT NULL,
|
||||
symbol TEXT NOT NULL,
|
||||
interval TEXT NOT NULL,
|
||||
indicator_name TEXT NOT NULL,
|
||||
value DECIMAL(18,8) NOT NULL,
|
||||
parameters JSONB,
|
||||
computed_at TIMESTAMPTZ DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- 6. Convert indicators to hypertable
|
||||
SELECT create_hypertable('indicators', 'time',
|
||||
chunk_time_interval => INTERVAL '7 days',
|
||||
if_not_exists => TRUE
|
||||
);
|
||||
|
||||
-- 7. Create index for indicators
|
||||
CREATE INDEX IF NOT EXISTS idx_indicators_lookup
|
||||
ON indicators (symbol, interval, indicator_name, time DESC);
|
||||
|
||||
-- 8. Create data quality log table
|
||||
CREATE TABLE IF NOT EXISTS data_quality (
|
||||
time TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
||||
check_type TEXT NOT NULL,
|
||||
severity TEXT NOT NULL,
|
||||
symbol TEXT,
|
||||
details JSONB,
|
||||
resolved BOOLEAN DEFAULT FALSE
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_quality_unresolved
|
||||
ON data_quality (resolved) WHERE resolved = FALSE;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_quality_time
|
||||
ON data_quality (time DESC);
|
||||
|
||||
-- 9. Create collector state tracking table
|
||||
CREATE TABLE IF NOT EXISTS collector_state (
|
||||
id SERIAL PRIMARY KEY,
|
||||
symbol TEXT NOT NULL UNIQUE,
|
||||
last_candle_time TIMESTAMPTZ,
|
||||
last_validation_time TIMESTAMPTZ,
|
||||
total_candles BIGINT DEFAULT 0,
|
||||
updated_at TIMESTAMPTZ DEFAULT NOW()
|
||||
);
|
||||
|
||||
-- 10. Insert initial state for cbBTC
|
||||
INSERT INTO collector_state (symbol, last_candle_time)
|
||||
VALUES ('cbBTC', NULL)
|
||||
ON CONFLICT (symbol) DO NOTHING;
|
||||
|
||||
-- 11. Enable compression for old data (after 7 days)
|
||||
ALTER TABLE candles SET (
|
||||
timescaledb.compress,
|
||||
timescaledb.compress_segmentby = 'symbol,interval'
|
||||
);
|
||||
|
||||
ALTER TABLE indicators SET (
|
||||
timescaledb.compress,
|
||||
timescaledb.compress_segmentby = 'symbol,interval,indicator_name'
|
||||
);
|
||||
|
||||
-- 12. Add compression policies
|
||||
SELECT add_compression_policy('candles', INTERVAL '7 days', if_not_exists => TRUE);
|
||||
SELECT add_compression_policy('indicators', INTERVAL '7 days', if_not_exists => TRUE);
|
||||
|
||||
-- 13. Create function to update collector state
|
||||
CREATE OR REPLACE FUNCTION update_collector_state()
|
||||
RETURNS TRIGGER AS $$
|
||||
BEGIN
|
||||
INSERT INTO collector_state (symbol, last_candle_time, total_candles)
|
||||
VALUES (NEW.symbol, NEW.time, 1)
|
||||
ON CONFLICT (symbol)
|
||||
DO UPDATE SET
|
||||
last_candle_time = NEW.time,
|
||||
total_candles = collector_state.total_candles + 1,
|
||||
updated_at = NOW();
|
||||
RETURN NEW;
|
||||
END;
|
||||
$$ LANGUAGE plpgsql;
|
||||
|
||||
-- 14. Create trigger to auto-update state
|
||||
DROP TRIGGER IF EXISTS trigger_update_state ON candles;
|
||||
CREATE TRIGGER trigger_update_state
|
||||
AFTER INSERT ON candles
|
||||
FOR EACH ROW
|
||||
EXECUTE FUNCTION update_collector_state();
|
||||
|
||||
-- 15. Create view for data health check
|
||||
CREATE OR REPLACE VIEW data_health AS
|
||||
SELECT
|
||||
symbol,
|
||||
COUNT(*) as total_candles,
|
||||
COUNT(*) FILTER (WHERE validated) as validated_candles,
|
||||
MAX(time) as latest_candle,
|
||||
MIN(time) as earliest_candle,
|
||||
NOW() - MAX(time) as time_since_last
|
||||
FROM candles
|
||||
GROUP BY symbol;
|
||||
|
||||
-- Success message
|
||||
SELECT 'Database schema initialized successfully' as status;
|
||||
43
docker/init-scripts/02-optimization.sql
Normal file
43
docker/init-scripts/02-optimization.sql
Normal file
@ -0,0 +1,43 @@
|
||||
-- Create a read-only user for API access (optional security)
|
||||
DO $$
|
||||
BEGIN
|
||||
IF NOT EXISTS (SELECT FROM pg_roles WHERE rolname = 'btc_api') THEN
|
||||
CREATE USER btc_api WITH PASSWORD 'api_password_change_me';
|
||||
END IF;
|
||||
END
|
||||
$$;
|
||||
|
||||
-- Grant read-only permissions
|
||||
GRANT CONNECT ON DATABASE btc_data TO btc_api;
|
||||
GRANT USAGE ON SCHEMA public TO btc_api;
|
||||
GRANT SELECT ON ALL TABLES IN SCHEMA public TO btc_api;
|
||||
|
||||
-- Grant sequence access for ID columns
|
||||
GRANT USAGE ON ALL SEQUENCES IN SCHEMA public TO btc_api;
|
||||
|
||||
-- Apply to future tables
|
||||
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO btc_api;
|
||||
|
||||
-- Create continuous aggregate for hourly stats (optional optimization)
|
||||
CREATE MATERIALIZED VIEW IF NOT EXISTS hourly_stats
|
||||
WITH (timescaledb.continuous) AS
|
||||
SELECT
|
||||
time_bucket('1 hour', time) as bucket,
|
||||
symbol,
|
||||
interval,
|
||||
FIRST(open, time) as first_open,
|
||||
MAX(high) as max_high,
|
||||
MIN(low) as min_low,
|
||||
LAST(close, time) as last_close,
|
||||
SUM(volume) as total_volume,
|
||||
COUNT(*) as candle_count
|
||||
FROM candles
|
||||
GROUP BY bucket, symbol, interval;
|
||||
|
||||
-- Add refresh policy for continuous aggregate
|
||||
SELECT add_continuous_aggregate_policy('hourly_stats',
|
||||
start_offset => INTERVAL '1 month',
|
||||
end_offset => INTERVAL '1 hour',
|
||||
schedule_interval => INTERVAL '1 hour',
|
||||
if_not_exists => TRUE
|
||||
);
|
||||
40
docker/timescaledb.conf
Normal file
40
docker/timescaledb.conf
Normal file
@ -0,0 +1,40 @@
|
||||
# Optimized for Synology DS218+ (2GB RAM, dual-core CPU)
|
||||
|
||||
# Required for TimescaleDB
|
||||
shared_preload_libraries = 'timescaledb'
|
||||
|
||||
# Memory settings
|
||||
shared_buffers = 256MB
|
||||
effective_cache_size = 768MB
|
||||
work_mem = 16MB
|
||||
maintenance_work_mem = 128MB
|
||||
|
||||
# Connection settings
|
||||
listen_addresses = '*'
|
||||
max_connections = 50
|
||||
max_worker_processes = 2
|
||||
max_parallel_workers_per_gather = 1
|
||||
max_parallel_workers = 2
|
||||
max_parallel_maintenance_workers = 1
|
||||
|
||||
# Write performance
|
||||
wal_buffers = 16MB
|
||||
checkpoint_completion_target = 0.9
|
||||
random_page_cost = 1.1
|
||||
effective_io_concurrency = 200
|
||||
|
||||
# TimescaleDB settings
|
||||
timescaledb.max_background_workers = 4
|
||||
|
||||
# Logging (use default pg_log directory inside PGDATA)
|
||||
logging_collector = on
|
||||
log_directory = 'log'
|
||||
log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log'
|
||||
log_rotation_age = 1d
|
||||
log_rotation_size = 100MB
|
||||
log_min_messages = warning
|
||||
log_min_error_statement = error
|
||||
|
||||
# Auto-vacuum for hypertables
|
||||
autovacuum_max_workers = 2
|
||||
autovacuum_naptime = 10s
|
||||
Reference in New Issue
Block a user