updated fast orders

This commit is contained in:
2025-11-02 19:56:40 +01:00
parent 93363750ae
commit d650bb5fe2
6 changed files with 932 additions and 354 deletions

View File

@ -10,6 +10,8 @@ import sqlite3
import pandas as pd
from datetime import datetime, timezone
import importlib
# --- REMOVED: import signal ---
# --- REMOVED: from queue import Empty ---
from logging_utils import setup_logging
# --- Using the new high-performance WebSocket utility for live prices ---
@ -22,7 +24,7 @@ WATCHED_COINS = ["BTC", "ETH", "SOL", "BNB", "HYPE", "ASTER", "ZEC", "PUMP", "SU
LIVE_CANDLE_FETCHER_SCRIPT = "live_candle_fetcher.py"
RESAMPLER_SCRIPT = "resampler.py"
MARKET_CAP_FETCHER_SCRIPT = "market_cap_fetcher.py"
TRADE_EXECUTOR_SCRIPT = "trade_executor.py"
# --- REMOVED: trade_executor.py is no longer a script ---
DASHBOARD_DATA_FETCHER_SCRIPT = "dashboard_data_fetcher.py"
STRATEGY_CONFIG_FILE = os.path.join("_data", "strategies.json")
DB_PATH = os.path.join("_data", "market_data.db")
@ -46,26 +48,61 @@ def format_market_cap(mc_value):
def run_live_candle_fetcher():
"""Target function to run the live_candle_fetcher.py script in a resilient loop."""
# --- GRACEFUL SHUTDOWN HANDLER ---
import signal
shutdown_requested = False
def handle_shutdown_signal(signum, frame):
nonlocal shutdown_requested
# Use print here as logging may not be set up
print(f"[CandleFetcher] Shutdown signal ({signum}) received. Will stop after current run.")
shutdown_requested = True
signal.signal(signal.SIGTERM, handle_shutdown_signal)
signal.signal(signal.SIGINT, handle_shutdown_signal)
# --- END GRACEFUL SHUTDOWN HANDLER ---
log_file = os.path.join(LOGS_DIR, "live_candle_fetcher.log")
while True:
while not shutdown_requested: # <-- MODIFIED
process = None
try:
with open(log_file, 'a') as f:
# We can't get coins from strategies.json here, so we pass the default list
command = [sys.executable, LIVE_CANDLE_FETCHER_SCRIPT, "--coins"] + WATCHED_COINS + ["--log-level", "off"]
f.write(f"\n--- Starting {LIVE_CANDLE_FETCHER_SCRIPT} at {datetime.now()} ---\n")
subprocess.run(command, check=True, stdout=f, stderr=subprocess.STDOUT)
# Use Popen instead of run to be non-blocking
process = subprocess.Popen(command, stdout=f, stderr=subprocess.STDOUT)
# Poll the process and check for shutdown request
while process.poll() is None and not shutdown_requested:
time.sleep(0.5) # Poll every 500ms
if shutdown_requested and process.poll() is None:
print(f"[CandleFetcher] Terminating subprocess {LIVE_CANDLE_FETCHER_SCRIPT}...")
process.terminate() # Terminate the child script
process.wait() # Wait for it to exit
print(f"[CandleFetcher] Subprocess terminated.")
except (subprocess.CalledProcessError, Exception) as e:
if shutdown_requested:
break # Don't restart if we're shutting down
with open(log_file, 'a') as f:
f.write(f"\n--- PROCESS ERROR at {datetime.now()} ---\n")
f.write(f"Live candle fetcher failed: {e}. Restarting...\n")
time.sleep(5)
if shutdown_requested:
break # Exit outer loop
print("[CandleFetcher] Live candle fetcher shutting down.")
def run_resampler_job(timeframes_to_generate: list):
"""Defines the job for the resampler, redirecting output to a log file."""
log_file = os.path.join(LOGS_DIR, "resampler.log")
try:
# --- MODIFIED: No longer needs to check for empty list, coins are from WATCHED_COINS ---
command = [sys.executable, RESAMPLER_SCRIPT, "--coins"] + WATCHED_COINS + ["--timeframes"] + timeframes_to_generate + ["--log-level", "normal"]
with open(log_file, 'a') as f:
f.write(f"\n--- Starting resampler.py job at {datetime.now()} ---\n")
@ -78,19 +115,34 @@ def run_resampler_job(timeframes_to_generate: list):
def resampler_scheduler(timeframes_to_generate: list):
"""Schedules the resampler.py script."""
setup_logging('off', 'ResamplerScheduler')
# --- GRACEFUL SHUTDOWN HANDLER ---
import signal
shutdown_requested = False
if not timeframes_to_generate:
logging.warning("Resampler scheduler started but no timeframes were provided to generate. The process will idle.")
return # Exit the function if there's nothing to do
def handle_shutdown_signal(signum, frame):
nonlocal shutdown_requested
try:
logging.info(f"Shutdown signal ({signum}) received. Exiting loop...")
except NameError:
print(f"[ResamplerScheduler] Shutdown signal ({signum}) received. Exiting loop...")
shutdown_requested = True
signal.signal(signal.SIGTERM, handle_shutdown_signal)
signal.signal(signal.SIGINT, handle_shutdown_signal)
# --- END GRACEFUL SHUTDOWN HANDLER ---
setup_logging('off', 'ResamplerScheduler')
run_resampler_job(timeframes_to_generate)
# Schedule to run every minute at the :01 second mark
schedule.every().minute.at(":01").do(run_resampler_job, timeframes_to_generate=timeframes_to_generate)
logging.info(f"Resampler scheduled to run every minute at :01 for {timeframes_to_generate}.")
while True:
logging.info("Resampler scheduled to run every minute at :01.")
while not shutdown_requested: # <-- MODIFIED
schedule.run_pending()
time.sleep(1) # Check every second to not miss the scheduled time
time.sleep(0.5) # Check every 500ms to not miss the scheduled time and be responsive
logging.info("ResamplerScheduler shutting down.")
def run_market_cap_fetcher_job():
@ -109,35 +161,128 @@ def run_market_cap_fetcher_job():
def market_cap_fetcher_scheduler():
"""Schedules the market_cap_fetcher.py script to run daily at a specific UTC time."""
# --- GRACEFUL SHUTDOWN HANDLER ---
import signal
shutdown_requested = False
def handle_shutdown_signal(signum, frame):
nonlocal shutdown_requested
try:
logging.info(f"Shutdown signal ({signum}) received. Exiting loop...")
except NameError:
print(f"[MarketCapScheduler] Shutdown signal ({signum}) received. Exiting loop...")
shutdown_requested = True
signal.signal(signal.SIGTERM, handle_shutdown_signal)
signal.signal(signal.SIGINT, handle_shutdown_signal)
# --- END GRACEFUL SHUTDOWN HANDLER ---
setup_logging('off', 'MarketCapScheduler')
schedule.every().day.at("00:15", "UTC").do(run_market_cap_fetcher_job)
while True:
while not shutdown_requested: # <-- MODIFIED
schedule.run_pending()
time.sleep(60)
# Sleep for 60 seconds, but check for shutdown flag every second
for _ in range(60):
if shutdown_requested:
break
time.sleep(1)
logging.info("MarketCapScheduler shutting down.")
def run_trade_executor(trade_signal_queue):
def run_trade_executor(order_execution_queue: multiprocessing.Queue):
"""
Target function to run the trade_executor.py script in a resilient loop.
It passes the shared signal queue to the executor.
Target function to run the TradeExecutor class in a resilient loop.
It now consumes from the order_execution_queue.
"""
log_file = os.path.join(LOGS_DIR, "trade_executor.log")
# --- GRACEFUL SHUTDOWN HANDLER ---
import signal
def handle_shutdown_signal(signum, frame):
# We can just raise KeyboardInterrupt, as it's handled below
logging.info(f"Shutdown signal ({signum}) received. Initiating graceful exit...")
raise KeyboardInterrupt
signal.signal(signal.SIGTERM, handle_shutdown_signal)
# --- END GRACEFUL SHUTDOWN HANDLER ---
log_file_path = os.path.join(LOGS_DIR, "trade_executor.log")
try:
sys.stdout = open(log_file_path, 'a', buffering=1)
sys.stderr = sys.stdout
except Exception as e:
print(f"Failed to open log file for TradeExecutor: {e}")
setup_logging('normal', f"TradeExecutor")
logging.info("\n--- Starting Trade Executor process ---")
while True:
try:
with open(log_file, 'a') as f:
f.write(f"\n--- Starting Trade Executor at {datetime.now()} ---\n")
from trade_executor import TradeExecutor
executor = TradeExecutor(log_level="normal", trade_signal_queue=trade_signal_queue)
executor.run() # This will block and run forever
except (subprocess.CalledProcessError, Exception) as e:
with open(log_file, 'a') as f:
f.write(f"\n--- PROCESS ERROR at {datetime.now()} ---\n")
f.write(f"Trade Executor failed: {e}. Restarting...\n")
from trade_executor import TradeExecutor
executor = TradeExecutor(log_level="normal", order_execution_queue=order_execution_queue)
# --- REVERTED: Call executor.run() directly ---
executor.run()
except KeyboardInterrupt:
logging.info("Trade Executor interrupted. Exiting.")
return
except Exception as e:
logging.error(f"Trade Executor failed: {e}. Restarting...\n", exc_info=True)
time.sleep(10)
def run_position_manager(trade_signal_queue: multiprocessing.Queue, order_execution_queue: multiprocessing.Queue):
"""
Target function to run the PositionManager class in a resilient loop.
Consumes from trade_signal_queue, produces for order_execution_queue.
"""
# --- GRACEFUL SHUTDOWN HANDLER ---
import signal
def handle_shutdown_signal(signum, frame):
# Raise KeyboardInterrupt, as it's handled by the loop
logging.info(f"Shutdown signal ({signum}) received. Initiating graceful exit...")
raise KeyboardInterrupt
signal.signal(signal.SIGTERM, handle_shutdown_signal)
# --- END GRACEFUL SHUTDOWN HANDLER ---
log_file_path = os.path.join(LOGS_DIR, "position_manager.log")
try:
sys.stdout = open(log_file_path, 'a', buffering=1)
sys.stderr = sys.stdout
except Exception as e:
print(f"Failed to open log file for PositionManager: {e}")
setup_logging('normal', f"PositionManager")
logging.info("\n--- Starting Position Manager process ---")
while True:
try:
from position_manager import PositionManager
manager = PositionManager(
log_level="normal",
trade_signal_queue=trade_signal_queue,
order_execution_queue=order_execution_queue
)
# --- REVERTED: Call manager.run() directly ---
manager.run()
except KeyboardInterrupt:
logging.info("Position Manager interrupted. Exiting.")
return
except Exception as e:
logging.error(f"Position Manager failed: {e}. Restarting...\n", exc_info=True)
time.sleep(10)
def run_strategy(strategy_name: str, config: dict, trade_signal_queue: multiprocessing.Queue):
"""
This function BECOMES the strategy runner. It is executed as a separate
@ -149,9 +294,22 @@ def run_strategy(strategy_name: str, config: dict, trade_signal_queue: multiproc
import sys
import time
import logging
import signal # <-- ADDED
from logging_utils import setup_logging
from strategies.base_strategy import BaseStrategy
# --- GRACEFUL SHUTDOWN HANDLER ---
def handle_shutdown_signal(signum, frame):
# Raise KeyboardInterrupt, as it's handled by the loop
try:
logging.info(f"Shutdown signal ({signum}) received. Initiating graceful exit...")
except NameError:
print(f"[Strategy-{strategy_name}] Shutdown signal ({signum}) received. Initiating graceful exit...")
raise KeyboardInterrupt
signal.signal(signal.SIGTERM, handle_shutdown_signal)
# --- END GRACEFUL SHUTDOWN HANDLER ---
# --- Setup logging to file for this specific process ---
log_file_path = os.path.join(LOGS_DIR, f"strategy_{strategy_name}.log")
try:
@ -183,10 +341,12 @@ def run_strategy(strategy_name: str, config: dict, trade_signal_queue: multiproc
logging.info(f"Starting POLLING logic loop...")
strategy.run_polling_loop() # This is the original blocking call
# --- REVERTED: Added back simple KeyboardInterrupt handler ---
except KeyboardInterrupt:
logging.info("Strategy process stopping.")
logging.info(f"Strategy {strategy_name} process stopping.")
return
except Exception as e:
# --- REVERTED: Removed specific check for KeyboardInterrupt ---
logging.error(f"Strategy '{strategy_name}' failed: {e}", exc_info=True)
logging.info("Restarting strategy in 10 seconds...")
time.sleep(10)
@ -194,12 +354,30 @@ def run_strategy(strategy_name: str, config: dict, trade_signal_queue: multiproc
def run_dashboard_data_fetcher():
"""Target function to run the dashboard_data_fetcher.py script."""
# --- GRACEFUL SHUTDOWN HANDLER ---
import signal
def handle_shutdown_signal(signum, frame):
# Raise KeyboardInterrupt, as it's handled by the loop
try:
logging.info(f"Shutdown signal ({signum}) received. Initiating graceful exit...")
except NameError:
print(f"[DashboardDataFetcher] Shutdown signal ({signum}) received. Initiating graceful exit...")
raise KeyboardInterrupt
signal.signal(signal.SIGTERM, handle_shutdown_signal)
# --- END GRACEFUL SHUTDOWN HANDLER ---
log_file = os.path.join(LOGS_DIR, "dashboard_data_fetcher.log")
while True:
try:
with open(log_file, 'a') as f:
f.write(f"\n--- Starting Dashboard Data Fetcher at {datetime.now()} ---\n")
subprocess.run([sys.executable, DASHBOARD_DATA_FETCHER_SCRIPT, "--log-level", "normal"], check=True, stdout=f, stderr=subprocess.STDOUT)
except KeyboardInterrupt: # --- MODIFIED: Added to catch interrupt ---
logging.info("Dashboard Data Fetcher stopping.")
break
except (subprocess.CalledProcessError, Exception) as e:
with open(log_file, 'a') as f:
f.write(f"\n--- PROCESS ERROR at {datetime.now()} ---\n")
@ -320,39 +498,27 @@ class MainApp:
config_params = self.strategy_configs.get(name, {}).get('parameters', {})
# --- NEW ROBUST LOGIC ---
# 1. Get Timeframe (always from config)
timeframe = config_params.get('timeframe', 'N/A')
# 2. Get Coin: Try status file first (live), then config file (static)
# --- FIX: Read coin/size from status file first, fallback to config ---
coin = status.get('coin', config_params.get('coin', 'N/A'))
# 3. Get Size: Try status file first, then config file
size_from_status = status.get('size', None)
size_from_config = config_params.get('size', None)
# --- FIX: Handle nested 'coins_to_copy' logic for size ---
if 'coins_to_copy' in config_params:
size = status.get('size', 'Multi')
else:
size = config_params.get('size', 'N/A')
size = "N/A"
if size_from_status is not None:
size = size_from_status # Use live status from copy_trader
elif size_from_config is not None:
size = size_from_config # Use config from simple strategy
elif 'coins_to_copy' in config_params:
# Special case: copy_trader, but status file is old (no 'size' field)
if coin != 'N/A' and coin != 'Multi':
# Try to find size in config if we know the coin from status
# --- SYNTAX FIX: Removed extra ".get(" ---
size = config_params.get('coins_to_copy', {}).get(coin, {}).get('size', 'Multi')
else:
coin = 'Multi' # It's a copy trader, but we don't know the coin
size = 'Multi'
size_display = f"{size:>8}" if isinstance(size, (int, float)) else f"{str(size):>8}"
# --- END OF NEW LOGIC ---
timeframe = config_params.get('timeframe', 'N/A')
# --- FIX: Removed parameter string logic ---
# --- FIX: Removed 'params_str' from the formatted line ---
right_table_lines.append(f"{i:^2} | {name:<25} | {coin:^6} | {signal:^8} | {price_display:>12} | {last_change_display:>17} | {timeframe:^5} | {size_display} |")
size_display = f"{size:>8}"
if isinstance(size, (int, float)):
size_display = f"{size:>8.4f}" # Format size to 4 decimal places
# --- END NEW LOGIC ---
right_table_lines.append(f"{i:^2} | {name:<25} | {coin:^6} | {signal:^8} | {price_display:>12} | {last_change_display:>17} | {timeframe:^5} | {size:>8} |")
right_table_lines.append("-" * right_table_width)
output_lines = []
@ -370,35 +536,7 @@ class MainApp:
output_lines.append(f"{'Account':<10} | {'Coin':<6} | {'Size':>15} | {'Entry Price':>12} | {'Mark Price':>12} | {'PNL':>15} | {'Leverage':>10} |")
output_lines.append("-" * pos_table_width)
perps_positions = self.open_positions.get('perpetuals_account', {}).get('open_positions', [])
spot_positions = self.open_positions.get('spot_account', {}).get('positions', [])
if not perps_positions and not spot_positions:
output_lines.append("No open positions found.")
else:
for pos in perps_positions:
try:
pnl = float(pos.get('pnl', 0.0))
pnl_str = f"${pnl:,.2f}"
except (ValueError, TypeError):
pnl_str = "Error"
coin = pos.get('coin') or '-'
size = pos.get('size') or '-'
entry_price = pos.get('entry_price') or '-'
mark_price = pos.get('mark_price') or '-'
leverage = pos.get('leverage') or '-'
output_lines.append(f"{'Perps':<10} | {coin:<6} | {size:>15} | {entry_price:>12} | {mark_price:>12} | {pnl_str:>15} | {leverage:>10} |")
for pos in spot_positions:
pnl = pos.get('pnl', 'N/A')
coin = pos.get('coin') or '-'
balance_size = pos.get('balance_size') or '-'
output_lines.append(f"{'Spot':<10} | {coin:<6} | {balance_size:>15} | {'-':>12} | {'-':>12} | {pnl:>15} | {'-':>10} |")
output_lines.append("-" * pos_table_width)
# --- REMOVED: Background Processes Section ---
# --- REMOVED: Background Processes section ---
final_output = "\n".join(output_lines)
print(final_output)
@ -422,7 +560,7 @@ if __name__ == "__main__":
os.makedirs(LOGS_DIR)
processes = {}
strategy_configs = {}
# --- REVERTED: Removed process groups ---
try:
with open(STRATEGY_CONFIG_FILE, 'r') as f:
@ -431,27 +569,37 @@ if __name__ == "__main__":
logging.error(f"Could not load strategies from '{STRATEGY_CONFIG_FILE}': {e}")
sys.exit(1)
# --- MODIFIED: Removed dynamic timeframe logic ---
# --- NEW: Hardcoded timeframes for the resampler ---
resampler_timeframes = [
# --- FIX: Hardcoded timeframes ---
required_timeframes = [
"3m", "5m", "15m", "30m", "1h", "2h", "4h", "8h",
"12h", "1d", "3d", "1w", "1M", "148m", "37m"
]
logging.info(f"Using hardcoded timeframes for resampler: {resampler_timeframes}")
# --- END NEW ---
logging.info(f"Using fixed timeframes for resampler: {required_timeframes}")
with multiprocessing.Manager() as manager:
shared_prices = manager.dict()
# --- FIX: Create TWO queues ---
trade_signal_queue = manager.Queue()
order_execution_queue = manager.Queue()
# --- REVERTED: All processes are daemon=True and in one dict ---
processes["Live Market Feed"] = multiprocessing.Process(target=start_live_feed, args=(shared_prices, 'off'), daemon=True)
processes["Live Candle Fetcher"] = multiprocessing.Process(target=run_live_candle_fetcher, daemon=True)
# --- MODIFIED: Pass the new hardcoded list to the resampler process ---
processes["Resampler"] = multiprocessing.Process(target=resampler_scheduler, args=(resampler_timeframes,), daemon=True)
processes["Resampler"] = multiprocessing.Process(target=resampler_scheduler, args=(list(required_timeframes),), daemon=True)
processes["Market Cap Fetcher"] = multiprocessing.Process(target=market_cap_fetcher_scheduler, daemon=True)
processes["Trade Executor"] = multiprocessing.Process(target=run_trade_executor, args=(trade_signal_queue,), daemon=True)
processes["Dashboard Data"] = multiprocessing.Process(target=run_dashboard_data_fetcher, daemon=True)
processes["Position Manager"] = multiprocessing.Process(
target=run_position_manager,
args=(trade_signal_queue, order_execution_queue),
daemon=True
)
processes["Trade Executor"] = multiprocessing.Process(
target=run_trade_executor,
args=(order_execution_queue,),
daemon=True
)
for name, config in strategy_configs.items():
if config.get("enabled", False):
@ -459,7 +607,9 @@ if __name__ == "__main__":
logging.error(f"Strategy '{name}' is missing 'class' key. Skipping.")
continue
proc = multiprocessing.Process(target=run_strategy, args=(name, config, trade_signal_queue), daemon=True)
processes[f"Strategy: {name}"] = proc
processes[f"Strategy: {name}"] = proc # Add to strategy group
# --- REVERTED: Removed combined dict ---
for name, proc in processes.items():
logging.info(f"Starting process '{name}'...")
@ -471,11 +621,47 @@ if __name__ == "__main__":
try:
app.run()
except KeyboardInterrupt:
# --- MODIFIED: Staged shutdown ---
logging.info("Shutting down...")
for proc in processes.values():
if proc.is_alive(): proc.terminate()
for proc in processes.values():
if proc.is_alive(): proc.join()
strategy_procs = {}
other_procs = {}
for name, proc in processes.items():
if name.startswith("Strategy:"):
strategy_procs[name] = proc
else:
other_procs[name] = proc
# --- 1. Terminate strategy processes ---
logging.info("Shutting down strategy processes first...")
for name, proc in strategy_procs.items():
if proc.is_alive():
logging.info(f"Terminating process: '{name}'...")
proc.terminate()
# --- 2. Wait for 5 seconds ---
logging.info("Waiting 5 seconds for strategies to close...")
time.sleep(5)
# --- 3. Terminate all other processes ---
logging.info("Shutting down remaining core processes...")
for name, proc in other_procs.items():
if proc.is_alive():
logging.info(f"Terminating process: '{name}'...")
proc.terminate()
# --- 4. Join all processes (strategies and others) ---
logging.info("Waiting for all processes to join...")
for name, proc in processes.items(): # Iterate over the original dict to get all
if proc.is_alive():
logging.info(f"Waiting for process '{name}' to join...")
proc.join(timeout=5) # Wait up to 5 seconds
if proc.is_alive():
# If it's still alive, force kill
logging.warning(f"Process '{name}' did not terminate, forcing kill.")
proc.kill()
# --- END MODIFIED ---
logging.info("Shutdown complete.")
sys.exit(0)