trade_executor, agent creator

This commit is contained in:
2025-10-16 13:18:39 +02:00
parent 0d53200882
commit 25df8b8ba9
15 changed files with 541 additions and 133 deletions

View File

@ -5,7 +5,8 @@ import sys
import sqlite3
import pandas as pd
import json
from datetime import datetime, timezone
from datetime import datetime, timezone, timedelta
import time
# Assuming logging_utils.py is in the same directory
from logging_utils import setup_logging
@ -14,6 +15,7 @@ class Resampler:
"""
Reads 1-minute candle data directly from the SQLite database, resamples
it to various timeframes, and stores the results back in the database.
This script is designed to run continuously as a self-scheduling service.
"""
def __init__(self, log_level: str, coins: list, timeframes: dict):
@ -30,27 +32,19 @@ class Resampler:
'volume': 'sum',
'number_of_trades': 'sum'
}
self.resampling_status = self._load_existing_status()
self.resampling_status = {}
def _load_existing_status(self) -> dict:
"""Loads the existing status file if it exists, otherwise returns an empty dict."""
if os.path.exists(self.status_file_path):
try:
with open(self.status_file_path, 'r', encoding='utf-8') as f:
logging.info(f"Loading existing status from '{self.status_file_path}'")
return json.load(f)
except (IOError, json.JSONDecodeError) as e:
logging.warning(f"Could not read existing status file. Starting fresh. Error: {e}")
return {}
def run(self):
def _execute_resampling_job(self):
"""
Main execution function to process all configured coins and update the database.
"""
if not os.path.exists(self.db_path):
logging.error(f"Database file '{self.db_path}' not found. "
"Please run the data fetcher script first.")
sys.exit(1)
return # Don't exit, just wait for the next cycle
# Load the latest status file at the start of each job
self.resampling_status = self._load_existing_status()
with sqlite3.connect(self.db_path) as conn:
conn.execute("PRAGMA journal_mode=WAL;")
@ -109,7 +103,40 @@ class Resampler:
logging.error(f"Failed to process coin '{coin}': {e}")
self._save_status()
logging.info("--- Resampling process complete ---")
logging.info("--- Resampling job complete ---")
def run_periodically(self):
"""Runs the resampling job at every 5-minute mark of the hour (00, 05, 10...)."""
logging.info("Resampler started. Waiting for the first scheduled run...")
while True:
# 1. Calculate sleep time
now = datetime.now(timezone.utc)
# Calculate how many minutes past the last 5-minute mark we are
minutes_past_mark = now.minute % 5
seconds_past_mark = minutes_past_mark * 60 + now.second + (now.microsecond / 1_000_000)
# The total interval is 5 minutes (300 seconds)
sleep_duration = 300 - seconds_past_mark
# Add a small buffer to ensure the candle data is ready
sleep_duration += 5
logging.info(f"Next resampling run in {sleep_duration:.2f} seconds.")
time.sleep(sleep_duration)
# 2. Execute the job
logging.info("Scheduled time reached. Starting resampling job...")
self._execute_resampling_job()
def _load_existing_status(self) -> dict:
"""Loads the existing status file if it exists, otherwise returns an empty dict."""
if os.path.exists(self.status_file_path):
try:
with open(self.status_file_path, 'r', encoding='utf-8') as f:
return json.load(f)
except (IOError, json.JSONDecodeError) as e:
logging.warning(f"Could not read existing status file. Starting fresh. Error: {e}")
return {}
def _save_status(self):
"""Saves the final resampling status to a JSON file."""
@ -138,7 +165,6 @@ def parse_timeframes(tf_strings: list) -> dict:
if unit == 'm':
code = f"{numeric_part}min"
elif unit == 'w':
# --- FIX: Use uppercase 'W' for weeks to avoid deprecation warning ---
code = f"{numeric_part}W"
elif unit in ['h', 'd']:
code = f"{numeric_part}{unit}"
@ -151,39 +177,30 @@ def parse_timeframes(tf_strings: list) -> dict:
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Resample 1-minute candle data from SQLite to other timeframes.")
parser.add_argument(
"--coins",
nargs='+',
default=["BTC", "ETH", "SOL", "BNB", "HYPE", "ASTER", "ZEC", "PUMP", "SUI"],
help="List of coins to process."
)
parser.add_argument(
"--timeframes",
nargs='+',
default=['4m', '5m', '15m', '30m', '37m', '148m', '4h', '12h', '1d', '1w'],
help="List of timeframes to generate (e.g., 5m 1h 1d)."
)
parser.add_argument(
"--timeframe",
dest="timeframes",
nargs='+',
help=argparse.SUPPRESS
)
parser.add_argument(
"--log-level",
default="normal",
choices=['off', 'normal', 'debug'],
help="Set the logging level for the script."
)
args = parser.parse_args()
# The script now runs as a long-running service, loading its config from a file.
CONFIG_FILE = "resampler_conf.json"
try:
with open(CONFIG_FILE, 'r') as f:
config = json.load(f)
coins = config.get("coins", [])
timeframes_list = config.get("timeframes", [])
except (FileNotFoundError, json.JSONDecodeError) as e:
print(f"FATAL: Could not load '{CONFIG_FILE}'. Please ensure it exists and is valid. Error: {e}")
sys.exit(1)
# Use a basic log level until the class is initialized
setup_logging('normal', 'Resampler')
timeframes_dict = parse_timeframes(args.timeframes)
timeframes_dict = parse_timeframes(timeframes_list)
resampler = Resampler(
log_level=args.log_level,
coins=args.coins,
log_level='normal',
coins=coins,
timeframes=timeframes_dict
)
resampler.run()
try:
resampler.run_periodically()
except KeyboardInterrupt:
logging.info("Resampler process stopped.")