chore: clean up project to focus only on btc_bot service on bot branch

This commit is contained in:
Gemini CLI
2026-03-05 20:59:52 +01:00
parent 295bd3085c
commit 907eecc4de
70 changed files with 8 additions and 13435 deletions

View File

@ -1,23 +0,0 @@
FROM python:3.11-slim
WORKDIR /app
# Copy requirements first (for better caching)
COPY requirements.txt .
# Install Python dependencies
RUN pip install --no-cache-dir -r requirements.txt
# Copy application code
COPY src/ ./src/
COPY config/ ./config/
COPY scripts/ ./scripts/
# Set Python path
ENV PYTHONPATH=/app
# Expose API port
EXPOSE 8000
# Run the API server
CMD ["uvicorn", "src.api.server:app", "--host", "0.0.0.0", "--port", "8000"]

View File

@ -3,10 +3,10 @@ FROM python:3.11-slim
WORKDIR /app
# Copy requirements first
COPY requirements_bot.txt .
COPY requirements.txt .
# Install dependencies
RUN pip install --no-cache-dir -r requirements_bot.txt
RUN pip install --no-cache-dir -r requirements.txt
# Copy application code
COPY src/ ./src/

View File

@ -1,21 +0,0 @@
FROM python:3.11-slim
WORKDIR /app
# Copy requirements first (for better caching)
COPY requirements.txt .
# Install Python dependencies
# --no-cache-dir reduces image size
RUN pip install --no-cache-dir -r requirements.txt
# Copy application code
COPY src/ ./src/
COPY config/ ./config/
COPY scripts/ ./scripts/
# Set Python path
ENV PYTHONPATH=/app
# Run the collector
CMD ["python", "-m", "src.data_collector.main"]

View File

@ -1 +0,0 @@
timescale/timescaledb:2.11.2-pg15

View File

@ -1,85 +1,6 @@
# Update docker-compose.yml to mount source code as volume
version: '3.8'
services:
timescaledb:
image: timescale/timescaledb:2.11.2-pg15
container_name: btc_timescale
environment:
POSTGRES_USER: btc_bot
POSTGRES_PASSWORD: ${DB_PASSWORD}
POSTGRES_DB: btc_data
TZ: Europe/Warsaw
volumes:
- /volume1/btc_bot/data:/var/lib/postgresql/data
- /volume1/btc_bot/backups:/backups
- ./timescaledb.conf:/etc/postgresql/postgresql.conf
- ./init-scripts:/docker-entrypoint-initdb.d
ports:
- "5433:5432"
command: postgres -c config_file=/etc/postgresql/postgresql.conf
restart: unless-stopped
deploy:
resources:
limits:
memory: 1.5G
reservations:
memory: 512M
healthcheck:
test: ["CMD-SHELL", "pg_isready -U btc_bot -d btc_data"]
interval: 10s
timeout: 5s
retries: 5
data_collector:
build:
context: ..
dockerfile: docker/Dockerfile.collector
image: btc_collector
container_name: btc_collector
network_mode: host
environment:
- DB_HOST=20.20.20.20
- DB_PORT=5433
- DB_NAME=btc_data
- DB_USER=btc_bot
- DB_PASSWORD=${DB_PASSWORD}
- LOG_LEVEL=INFO
volumes:
- ../src:/app/src
- /volume1/btc_bot/logs:/app/logs
- ../config:/app/config:ro
restart: unless-stopped
deploy:
resources:
limits:
memory: 256M
reservations:
memory: 128M
api_server:
build:
context: ..
dockerfile: docker/Dockerfile.api
image: btc_api
container_name: btc_api
network_mode: host
environment:
- DB_HOST=20.20.20.20
- DB_PORT=5433
- DB_NAME=btc_data
- DB_USER=btc_bot
- DB_PASSWORD=${DB_PASSWORD}
volumes:
- ../src:/app/src
- /volume1/btc_bot/exports:/app/exports
- ../config:/app/config:ro
restart: unless-stopped
deploy:
resources:
limits:
memory: 512M
ping_pong_bot:
build:
context: ..

View File

@ -1,199 +0,0 @@
-- 1. Enable TimescaleDB extension
CREATE EXTENSION IF NOT EXISTS timescaledb;
-- 2. Create candles table (main data storage)
CREATE TABLE IF NOT EXISTS candles (
time TIMESTAMPTZ NOT NULL,
symbol TEXT NOT NULL,
interval TEXT NOT NULL,
open DECIMAL(18,8) NOT NULL,
high DECIMAL(18,8) NOT NULL,
low DECIMAL(18,8) NOT NULL,
close DECIMAL(18,8) NOT NULL,
volume DECIMAL(18,8) NOT NULL,
validated BOOLEAN DEFAULT FALSE,
source TEXT DEFAULT 'hyperliquid',
created_at TIMESTAMPTZ DEFAULT NOW()
);
-- 3. Convert to hypertable (partitioned by time)
SELECT create_hypertable('candles', 'time',
chunk_time_interval => INTERVAL '7 days',
if_not_exists => TRUE
);
-- 4. Create unique constraint for upserts (required by ON CONFLICT)
ALTER TABLE candles
ADD CONSTRAINT candles_unique_candle
UNIQUE (time, symbol, interval);
-- 5. Create indexes for efficient queries
CREATE INDEX IF NOT EXISTS idx_candles_symbol_time
ON candles (symbol, interval, time DESC);
CREATE INDEX IF NOT EXISTS idx_candles_validated
ON candles (validated) WHERE validated = FALSE;
-- 5. Create indicators table (computed values)
CREATE TABLE IF NOT EXISTS indicators (
time TIMESTAMPTZ NOT NULL,
symbol TEXT NOT NULL,
interval TEXT NOT NULL,
indicator_name TEXT NOT NULL,
value DECIMAL(18,8) NOT NULL,
parameters JSONB,
computed_at TIMESTAMPTZ DEFAULT NOW()
);
-- 6. Convert indicators to hypertable
SELECT create_hypertable('indicators', 'time',
chunk_time_interval => INTERVAL '7 days',
if_not_exists => TRUE
);
-- 7. Create unique constraint + index for indicators (required for upserts)
ALTER TABLE indicators
ADD CONSTRAINT indicators_unique
UNIQUE (time, symbol, interval, indicator_name);
CREATE INDEX IF NOT EXISTS idx_indicators_lookup
ON indicators (symbol, interval, indicator_name, time DESC);
-- 8. Create data quality log table
CREATE TABLE IF NOT EXISTS data_quality (
time TIMESTAMPTZ NOT NULL DEFAULT NOW(),
check_type TEXT NOT NULL,
severity TEXT NOT NULL,
symbol TEXT,
details JSONB,
resolved BOOLEAN DEFAULT FALSE
);
CREATE INDEX IF NOT EXISTS idx_quality_unresolved
ON data_quality (resolved) WHERE resolved = FALSE;
CREATE INDEX IF NOT EXISTS idx_quality_time
ON data_quality (time DESC);
-- 9. Create collector state tracking table
CREATE TABLE IF NOT EXISTS collector_state (
id SERIAL PRIMARY KEY,
symbol TEXT NOT NULL UNIQUE,
last_candle_time TIMESTAMPTZ,
last_validation_time TIMESTAMPTZ,
total_candles BIGINT DEFAULT 0,
updated_at TIMESTAMPTZ DEFAULT NOW()
);
-- 10. Insert initial state for cbBTC
INSERT INTO collector_state (symbol, last_candle_time)
VALUES ('cbBTC', NULL)
ON CONFLICT (symbol) DO NOTHING;
-- 11. Enable compression for old data (after 7 days)
ALTER TABLE candles SET (
timescaledb.compress,
timescaledb.compress_segmentby = 'symbol,interval'
);
ALTER TABLE indicators SET (
timescaledb.compress,
timescaledb.compress_segmentby = 'symbol,interval,indicator_name'
);
-- 12. Add compression policies
SELECT add_compression_policy('candles', INTERVAL '7 days', if_not_exists => TRUE);
SELECT add_compression_policy('indicators', INTERVAL '7 days', if_not_exists => TRUE);
-- 13. Create function to update collector state
CREATE OR REPLACE FUNCTION update_collector_state()
RETURNS TRIGGER AS $$
BEGIN
INSERT INTO collector_state (symbol, last_candle_time, total_candles)
VALUES (NEW.symbol, NEW.time, 1)
ON CONFLICT (symbol)
DO UPDATE SET
last_candle_time = NEW.time,
total_candles = collector_state.total_candles + 1,
updated_at = NOW();
RETURN NEW;
END;
$$ LANGUAGE plpgsql;
-- 14. Create trigger to auto-update state
DROP TRIGGER IF EXISTS trigger_update_state ON candles;
CREATE TRIGGER trigger_update_state
AFTER INSERT ON candles
FOR EACH ROW
EXECUTE FUNCTION update_collector_state();
-- 15. Create view for data health check
CREATE OR REPLACE VIEW data_health AS
SELECT
symbol,
COUNT(*) as total_candles,
COUNT(*) FILTER (WHERE validated) as validated_candles,
MAX(time) as latest_candle,
MIN(time) as earliest_candle,
NOW() - MAX(time) as time_since_last
FROM candles
GROUP BY symbol;
-- 16. Create decisions table (brain outputs - buy/sell/hold with full context)
CREATE TABLE IF NOT EXISTS decisions (
time TIMESTAMPTZ NOT NULL,
symbol TEXT NOT NULL,
interval TEXT NOT NULL,
decision_type TEXT NOT NULL,
strategy TEXT NOT NULL,
confidence DECIMAL(5,4),
price_at_decision DECIMAL(18,8),
indicator_snapshot JSONB NOT NULL,
candle_snapshot JSONB NOT NULL,
reasoning TEXT,
backtest_id TEXT,
executed BOOLEAN DEFAULT FALSE,
execution_price DECIMAL(18,8),
execution_time TIMESTAMPTZ,
created_at TIMESTAMPTZ DEFAULT NOW()
);
-- 17. Convert decisions to hypertable
SELECT create_hypertable('decisions', 'time',
chunk_time_interval => INTERVAL '7 days',
if_not_exists => TRUE
);
-- 18. Indexes for decisions - separate live from backtest queries
CREATE INDEX IF NOT EXISTS idx_decisions_live
ON decisions (symbol, interval, time DESC) WHERE backtest_id IS NULL;
CREATE INDEX IF NOT EXISTS idx_decisions_backtest
ON decisions (backtest_id, symbol, time DESC) WHERE backtest_id IS NOT NULL;
CREATE INDEX IF NOT EXISTS idx_decisions_type
ON decisions (symbol, decision_type, time DESC);
-- 19. Create backtest_runs metadata table
CREATE TABLE IF NOT EXISTS backtest_runs (
id TEXT PRIMARY KEY,
strategy TEXT NOT NULL,
symbol TEXT NOT NULL DEFAULT 'BTC',
start_time TIMESTAMPTZ NOT NULL,
end_time TIMESTAMPTZ NOT NULL,
intervals TEXT[] NOT NULL,
config JSONB,
results JSONB,
created_at TIMESTAMPTZ DEFAULT NOW()
);
-- 20. Compression for decisions
ALTER TABLE decisions SET (
timescaledb.compress,
timescaledb.compress_segmentby = 'symbol,interval,strategy'
);
SELECT add_compression_policy('decisions', INTERVAL '7 days', if_not_exists => TRUE);
-- Success message
SELECT 'Database schema initialized successfully' as status;

View File

@ -1,43 +0,0 @@
-- Create a read-only user for API access (optional security)
DO $$
BEGIN
IF NOT EXISTS (SELECT FROM pg_roles WHERE rolname = 'btc_api') THEN
CREATE USER btc_api WITH PASSWORD 'api_password_change_me';
END IF;
END
$$;
-- Grant read-only permissions
GRANT CONNECT ON DATABASE btc_data TO btc_api;
GRANT USAGE ON SCHEMA public TO btc_api;
GRANT SELECT ON ALL TABLES IN SCHEMA public TO btc_api;
-- Grant sequence access for ID columns
GRANT USAGE ON ALL SEQUENCES IN SCHEMA public TO btc_api;
-- Apply to future tables
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO btc_api;
-- Create continuous aggregate for hourly stats (optional optimization)
CREATE MATERIALIZED VIEW IF NOT EXISTS hourly_stats
WITH (timescaledb.continuous) AS
SELECT
time_bucket('1 hour', time) as bucket,
symbol,
interval,
FIRST(open, time) as first_open,
MAX(high) as max_high,
MIN(low) as min_low,
LAST(close, time) as last_close,
SUM(volume) as total_volume,
COUNT(*) as candle_count
FROM candles
GROUP BY bucket, symbol, interval;
-- Add refresh policy for continuous aggregate
SELECT add_continuous_aggregate_policy('hourly_stats',
start_offset => INTERVAL '1 month',
end_offset => INTERVAL '1 hour',
schedule_interval => INTERVAL '1 hour',
if_not_exists => TRUE
);

View File

@ -1,41 +0,0 @@
# Optimized for Synology DS218+ (2GB RAM, dual-core CPU)
# Required for TimescaleDB
shared_preload_libraries = 'timescaledb'
# Memory settings
shared_buffers = 256MB
effective_cache_size = 768MB
work_mem = 16MB
maintenance_work_mem = 128MB
# Connection settings
listen_addresses = '*'
max_connections = 50
max_locks_per_transaction = 256
max_worker_processes = 2
max_parallel_workers_per_gather = 1
max_parallel_workers = 2
max_parallel_maintenance_workers = 1
# Write performance
wal_buffers = 16MB
checkpoint_completion_target = 0.9
random_page_cost = 1.1
effective_io_concurrency = 200
# TimescaleDB settings
timescaledb.max_background_workers = 4
# Logging (use default pg_log directory inside PGDATA)
logging_collector = on
log_directory = 'log'
log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log'
log_rotation_age = 1d
log_rotation_size = 100MB
log_min_messages = warning
log_min_error_statement = error
# Auto-vacuum for hypertables
autovacuum_max_workers = 2
autovacuum_naptime = 10s