482 lines
22 KiB
Python
482 lines
22 KiB
Python
import json
|
|
import logging
|
|
import os
|
|
import sys
|
|
import time
|
|
import subprocess
|
|
import multiprocessing
|
|
import schedule
|
|
import sqlite3
|
|
import pandas as pd
|
|
from datetime import datetime, timezone
|
|
import importlib
|
|
|
|
from logging_utils import setup_logging
|
|
# --- Using the new high-performance WebSocket utility for live prices ---
|
|
from live_market_utils import start_live_feed
|
|
# --- Import the base class for type hinting (optional but good practice) ---
|
|
from strategies.base_strategy import BaseStrategy
|
|
|
|
# --- Configuration ---
|
|
WATCHED_COINS = ["BTC", "ETH", "SOL", "BNB", "HYPE", "ASTER", "ZEC", "PUMP", "SUI"]
|
|
LIVE_CANDLE_FETCHER_SCRIPT = "live_candle_fetcher.py"
|
|
RESAMPLER_SCRIPT = "resampler.py"
|
|
MARKET_CAP_FETCHER_SCRIPT = "market_cap_fetcher.py"
|
|
TRADE_EXECUTOR_SCRIPT = "trade_executor.py"
|
|
DASHBOARD_DATA_FETCHER_SCRIPT = "dashboard_data_fetcher.py"
|
|
STRATEGY_CONFIG_FILE = os.path.join("_data", "strategies.json")
|
|
DB_PATH = os.path.join("_data", "market_data.db")
|
|
MARKET_CAP_SUMMARY_FILE = os.path.join("_data", "market_cap_data.json")
|
|
LOGS_DIR = "_logs"
|
|
TRADE_EXECUTOR_STATUS_FILE = os.path.join(LOGS_DIR, "trade_executor_status.json")
|
|
|
|
|
|
def format_market_cap(mc_value):
|
|
"""Formats a large number into a human-readable market cap string."""
|
|
if not isinstance(mc_value, (int, float)) or mc_value == 0:
|
|
return "N/A"
|
|
if mc_value >= 1_000_000_000_000:
|
|
return f"${mc_value / 1_000_000_000_000:.2f}T"
|
|
if mc_value >= 1_000_000_000:
|
|
return f"${mc_value / 1_000_000_000:.2f}B"
|
|
if mc_value >= 1_000_000:
|
|
return f"${mc_value / 1_000_000:.2f}M"
|
|
return f"${mc_value:,.2f}"
|
|
|
|
|
|
def run_live_candle_fetcher():
|
|
"""Target function to run the live_candle_fetcher.py script in a resilient loop."""
|
|
log_file = os.path.join(LOGS_DIR, "live_candle_fetcher.log")
|
|
while True:
|
|
try:
|
|
with open(log_file, 'a') as f:
|
|
# We can't get coins from strategies.json here, so we pass the default list
|
|
command = [sys.executable, LIVE_CANDLE_FETCHER_SCRIPT, "--coins"] + WATCHED_COINS + ["--log-level", "off"]
|
|
f.write(f"\n--- Starting {LIVE_CANDLE_FETCHER_SCRIPT} at {datetime.now()} ---\n")
|
|
subprocess.run(command, check=True, stdout=f, stderr=subprocess.STDOUT)
|
|
except (subprocess.CalledProcessError, Exception) as e:
|
|
with open(log_file, 'a') as f:
|
|
f.write(f"\n--- PROCESS ERROR at {datetime.now()} ---\n")
|
|
f.write(f"Live candle fetcher failed: {e}. Restarting...\n")
|
|
time.sleep(5)
|
|
|
|
|
|
def run_resampler_job(timeframes_to_generate: list):
|
|
"""Defines the job for the resampler, redirecting output to a log file."""
|
|
log_file = os.path.join(LOGS_DIR, "resampler.log")
|
|
try:
|
|
# --- MODIFIED: No longer needs to check for empty list, coins are from WATCHED_COINS ---
|
|
command = [sys.executable, RESAMPLER_SCRIPT, "--coins"] + WATCHED_COINS + ["--timeframes"] + timeframes_to_generate + ["--log-level", "normal"]
|
|
with open(log_file, 'a') as f:
|
|
f.write(f"\n--- Starting resampler.py job at {datetime.now()} ---\n")
|
|
subprocess.run(command, check=True, stdout=f, stderr=subprocess.STDOUT)
|
|
except Exception as e:
|
|
with open(log_file, 'a') as f:
|
|
f.write(f"\n--- SCHEDULER ERROR at {datetime.now()} ---\n")
|
|
f.write(f"Failed to run resampler.py job: {e}\n")
|
|
|
|
|
|
def resampler_scheduler(timeframes_to_generate: list):
|
|
"""Schedules the resampler.py script."""
|
|
setup_logging('off', 'ResamplerScheduler')
|
|
|
|
if not timeframes_to_generate:
|
|
logging.warning("Resampler scheduler started but no timeframes were provided to generate. The process will idle.")
|
|
return # Exit the function if there's nothing to do
|
|
|
|
run_resampler_job(timeframes_to_generate)
|
|
# Schedule to run every minute at the :01 second mark
|
|
schedule.every().minute.at(":01").do(run_resampler_job, timeframes_to_generate=timeframes_to_generate)
|
|
logging.info(f"Resampler scheduled to run every minute at :01 for {timeframes_to_generate}.")
|
|
while True:
|
|
schedule.run_pending()
|
|
time.sleep(1) # Check every second to not miss the scheduled time
|
|
|
|
|
|
def run_market_cap_fetcher_job():
|
|
"""Defines the job for the market cap fetcher, redirecting output."""
|
|
log_file = os.path.join(LOGS_DIR, "market_cap_fetcher.log")
|
|
try:
|
|
command = [sys.executable, MARKET_CAP_FETCHER_SCRIPT, "--log-level", "off"]
|
|
with open(log_file, 'a') as f:
|
|
f.write(f"\n--- Starting {MARKET_CAP_FETCHER_SCRIPT} job at {datetime.now()} ---\n")
|
|
subprocess.run(command, check=True, stdout=f, stderr=subprocess.STDOUT)
|
|
except Exception as e:
|
|
with open(log_file, 'a') as f:
|
|
f.write(f"\n--- SCHEDULER ERROR at {datetime.now()} ---\n")
|
|
f.write(f"Failed to run {MARKET_CAP_FETCHER_SCRIPT} job: {e}\n")
|
|
|
|
|
|
def market_cap_fetcher_scheduler():
|
|
"""Schedules the market_cap_fetcher.py script to run daily at a specific UTC time."""
|
|
setup_logging('off', 'MarketCapScheduler')
|
|
schedule.every().day.at("00:15", "UTC").do(run_market_cap_fetcher_job)
|
|
while True:
|
|
schedule.run_pending()
|
|
time.sleep(60)
|
|
|
|
|
|
def run_trade_executor(trade_signal_queue):
|
|
"""
|
|
Target function to run the trade_executor.py script in a resilient loop.
|
|
It passes the shared signal queue to the executor.
|
|
"""
|
|
log_file = os.path.join(LOGS_DIR, "trade_executor.log")
|
|
while True:
|
|
try:
|
|
with open(log_file, 'a') as f:
|
|
f.write(f"\n--- Starting Trade Executor at {datetime.now()} ---\n")
|
|
|
|
from trade_executor import TradeExecutor
|
|
|
|
executor = TradeExecutor(log_level="normal", trade_signal_queue=trade_signal_queue)
|
|
executor.run() # This will block and run forever
|
|
|
|
except (subprocess.CalledProcessError, Exception) as e:
|
|
with open(log_file, 'a') as f:
|
|
f.write(f"\n--- PROCESS ERROR at {datetime.now()} ---\n")
|
|
f.write(f"Trade Executor failed: {e}. Restarting...\n")
|
|
time.sleep(10)
|
|
|
|
def run_strategy(strategy_name: str, config: dict, trade_signal_queue: multiprocessing.Queue):
|
|
"""
|
|
This function BECOMES the strategy runner. It is executed as a separate
|
|
process and pushes signals to the shared queue.
|
|
"""
|
|
# These imports only happen in the new, lightweight process
|
|
import importlib
|
|
import os
|
|
import sys
|
|
import time
|
|
import logging
|
|
from logging_utils import setup_logging
|
|
from strategies.base_strategy import BaseStrategy
|
|
|
|
# --- Setup logging to file for this specific process ---
|
|
log_file_path = os.path.join(LOGS_DIR, f"strategy_{strategy_name}.log")
|
|
try:
|
|
sys.stdout = open(log_file_path, 'a', buffering=1) # 1 = line buffering
|
|
sys.stderr = sys.stdout
|
|
except Exception as e:
|
|
print(f"Failed to open log file for {strategy_name}: {e}")
|
|
|
|
setup_logging('normal', f"Strategy-{strategy_name}")
|
|
|
|
while True:
|
|
try:
|
|
logging.info(f"--- Starting strategy '{strategy_name}' ---")
|
|
|
|
if 'class' not in config:
|
|
logging.error(f"Strategy config for '{strategy_name}' is missing the 'class' key. Exiting.")
|
|
return
|
|
|
|
module_path, class_name = config['class'].rsplit('.', 1)
|
|
module = importlib.import_module(module_path)
|
|
StrategyClass = getattr(module, class_name)
|
|
|
|
strategy = StrategyClass(strategy_name, config['parameters'], trade_signal_queue)
|
|
|
|
if config.get("is_event_driven", False):
|
|
logging.info(f"Starting EVENT-DRIVEN logic loop...")
|
|
strategy.run_event_loop() # This is a blocking call
|
|
else:
|
|
logging.info(f"Starting POLLING logic loop...")
|
|
strategy.run_polling_loop() # This is the original blocking call
|
|
|
|
except KeyboardInterrupt:
|
|
logging.info("Strategy process stopping.")
|
|
return
|
|
except Exception as e:
|
|
logging.error(f"Strategy '{strategy_name}' failed: {e}", exc_info=True)
|
|
logging.info("Restarting strategy in 10 seconds...")
|
|
time.sleep(10)
|
|
|
|
|
|
def run_dashboard_data_fetcher():
|
|
"""Target function to run the dashboard_data_fetcher.py script."""
|
|
log_file = os.path.join(LOGS_DIR, "dashboard_data_fetcher.log")
|
|
while True:
|
|
try:
|
|
with open(log_file, 'a') as f:
|
|
f.write(f"\n--- Starting Dashboard Data Fetcher at {datetime.now()} ---\n")
|
|
subprocess.run([sys.executable, DASHBOARD_DATA_FETCHER_SCRIPT, "--log-level", "normal"], check=True, stdout=f, stderr=subprocess.STDOUT)
|
|
except (subprocess.CalledProcessError, Exception) as e:
|
|
with open(log_file, 'a') as f:
|
|
f.write(f"\n--- PROCESS ERROR at {datetime.now()} ---\n")
|
|
f.write(f"Dashboard Data Fetcher failed: {e}. Restarting...\n")
|
|
time.sleep(10)
|
|
|
|
|
|
class MainApp:
|
|
def __init__(self, coins_to_watch: list, processes: dict, strategy_configs: dict, shared_prices: dict):
|
|
self.watched_coins = coins_to_watch
|
|
self.shared_prices = shared_prices
|
|
self.prices = {}
|
|
self.market_caps = {}
|
|
self.open_positions = {}
|
|
self.background_processes = processes
|
|
self.process_status = {}
|
|
self.strategy_configs = strategy_configs
|
|
self.strategy_statuses = {}
|
|
|
|
def read_prices(self):
|
|
"""Reads the latest prices directly from the shared memory dictionary."""
|
|
try:
|
|
self.prices = dict(self.shared_prices)
|
|
except Exception as e:
|
|
logging.debug("Could not read from shared prices dict: {e}")
|
|
|
|
def read_market_caps(self):
|
|
"""Reads the latest market cap summary from its JSON file."""
|
|
if os.path.exists(MARKET_CAP_SUMMARY_FILE):
|
|
try:
|
|
with open(MARKET_CAP_SUMMARY_FILE, 'r', encoding='utf-8') as f:
|
|
summary_data = json.load(f)
|
|
|
|
for coin in self.watched_coins:
|
|
table_key = f"{coin}_market_cap"
|
|
if table_key in summary_data:
|
|
self.market_caps[coin] = summary_data[table_key].get('market_cap')
|
|
except (json.JSONDecodeError, IOError):
|
|
logging.debug("Could not read market cap summary file.")
|
|
|
|
def read_strategy_statuses(self):
|
|
"""Reads the status JSON file for each enabled strategy."""
|
|
enabled_statuses = {}
|
|
for name, config in self.strategy_configs.items():
|
|
if config.get("enabled", False):
|
|
status_file = os.path.join("_data", f"strategy_status_{name}.json")
|
|
if os.path.exists(status_file):
|
|
try:
|
|
with open(status_file, 'r', encoding='utf-8') as f:
|
|
enabled_statuses[name] = json.load(f)
|
|
except (IOError, json.JSONDecodeError):
|
|
enabled_statuses[name] = {"error": "Could not read status file."}
|
|
else:
|
|
enabled_statuses[name] = {"current_signal": "Initializing..."}
|
|
self.strategy_statuses = enabled_statuses
|
|
|
|
def read_executor_status(self):
|
|
"""Reads the live status file from the trade executor."""
|
|
if os.path.exists(TRADE_EXECUTOR_STATUS_FILE):
|
|
try:
|
|
with open(TRADE_EXECUTOR_STATUS_FILE, 'r', encoding='utf-8') as f:
|
|
self.open_positions = json.load(f)
|
|
except (IOError, json.JSONDecodeError):
|
|
logging.debug("Could not read trade executor status file.")
|
|
else:
|
|
self.open_positions = {}
|
|
|
|
def check_process_status(self):
|
|
"""Checks if the background processes are still running."""
|
|
for name, process in self.background_processes.items():
|
|
self.process_status[name] = "Running" if process.is_alive() else "STOPPED"
|
|
|
|
def display_dashboard(self):
|
|
"""Displays a formatted dashboard with side-by-side tables."""
|
|
print("\x1b[H\x1b[J", end="") # Clear screen
|
|
|
|
left_table_lines = ["--- Market Dashboard ---"]
|
|
left_table_width = 44
|
|
left_table_lines.append("-" * left_table_width)
|
|
left_table_lines.append(f"{'#':<2} | {'Coin':^6} | {'Live Price':>10} | {'Market Cap':>15} |")
|
|
left_table_lines.append("-" * left_table_width)
|
|
for i, coin in enumerate(self.watched_coins, 1):
|
|
price_str = self.prices.get(coin, "Loading...")
|
|
# Format the price string
|
|
try:
|
|
price_float = float(price_str)
|
|
if price_float < 1:
|
|
price_str = f"{price_float:>10.6f}"
|
|
elif price_float < 100:
|
|
price_str = f"{price_float:>10.4f}"
|
|
else:
|
|
price_str = f"{price_float:>10.2f}"
|
|
except (ValueError, TypeError):
|
|
price_str = f"{'Loading...':>10}"
|
|
|
|
market_cap = self.market_caps.get(coin)
|
|
formatted_mc = format_market_cap(market_cap)
|
|
left_table_lines.append(f"{i:<2} | {coin:^6} | {price_str} | {formatted_mc:>15} |")
|
|
left_table_lines.append("-" * left_table_width)
|
|
|
|
right_table_lines = ["--- Strategy Status ---"]
|
|
# --- FIX: Adjusted table width after removing parameters ---
|
|
right_table_width = 105
|
|
right_table_lines.append("-" * right_table_width)
|
|
# --- FIX: Removed 'Parameters' from header ---
|
|
right_table_lines.append(f"{'#':^2} | {'Strategy Name':<25} | {'Coin':^6} | {'Signal':^8} | {'Signal Price':>12} | {'Last Change':>17} | {'TF':^5} | {'Size':^8} |")
|
|
right_table_lines.append("-" * right_table_width)
|
|
for i, (name, status) in enumerate(self.strategy_statuses.items(), 1):
|
|
signal = status.get('current_signal', 'N/A')
|
|
price = status.get('signal_price')
|
|
price_display = f"{price:.4f}" if isinstance(price, (int, float)) else "-"
|
|
last_change = status.get('last_signal_change_utc')
|
|
last_change_display = 'Never'
|
|
if last_change:
|
|
dt_utc = datetime.fromisoformat(last_change.replace('Z', '+00:00')).replace(tzinfo=timezone.utc)
|
|
dt_local = dt_utc.astimezone(None)
|
|
last_change_display = dt_local.strftime('%Y-%m-%d %H:%M')
|
|
|
|
config_params = self.strategy_configs.get(name, {}).get('parameters', {})
|
|
|
|
# --- NEW ROBUST LOGIC ---
|
|
# 1. Get Timeframe (always from config)
|
|
timeframe = config_params.get('timeframe', 'N/A')
|
|
|
|
# 2. Get Coin: Try status file first (live), then config file (static)
|
|
coin = status.get('coin', config_params.get('coin', 'N/A'))
|
|
|
|
# 3. Get Size: Try status file first, then config file
|
|
size_from_status = status.get('size', None)
|
|
size_from_config = config_params.get('size', None)
|
|
|
|
size = "N/A"
|
|
if size_from_status is not None:
|
|
size = size_from_status # Use live status from copy_trader
|
|
elif size_from_config is not None:
|
|
size = size_from_config # Use config from simple strategy
|
|
elif 'coins_to_copy' in config_params:
|
|
# Special case: copy_trader, but status file is old (no 'size' field)
|
|
if coin != 'N/A' and coin != 'Multi':
|
|
# Try to find size in config if we know the coin from status
|
|
# --- SYNTAX FIX: Removed extra ".get(" ---
|
|
size = config_params.get('coins_to_copy', {}).get(coin, {}).get('size', 'Multi')
|
|
else:
|
|
coin = 'Multi' # It's a copy trader, but we don't know the coin
|
|
size = 'Multi'
|
|
|
|
size_display = f"{size:>8}" if isinstance(size, (int, float)) else f"{str(size):>8}"
|
|
# --- END OF NEW LOGIC ---
|
|
|
|
# --- FIX: Removed parameter string logic ---
|
|
|
|
# --- FIX: Removed 'params_str' from the formatted line ---
|
|
right_table_lines.append(f"{i:^2} | {name:<25} | {coin:^6} | {signal:^8} | {price_display:>12} | {last_change_display:>17} | {timeframe:^5} | {size_display} |")
|
|
right_table_lines.append("-" * right_table_width)
|
|
|
|
output_lines = []
|
|
max_rows = max(len(left_table_lines), len(right_table_lines))
|
|
separator = " "
|
|
indent = " " * 10
|
|
for i in range(max_rows):
|
|
left_part = left_table_lines[i] if i < len(left_table_lines) else " " * left_table_width
|
|
right_part = indent + right_table_lines[i] if i < len(right_table_lines) else ""
|
|
output_lines.append(f"{left_part}{separator}{right_part}")
|
|
|
|
output_lines.append("\n--- Open Positions ---")
|
|
pos_table_width = 100
|
|
output_lines.append("-" * pos_table_width)
|
|
output_lines.append(f"{'Account':<10} | {'Coin':<6} | {'Size':>15} | {'Entry Price':>12} | {'Mark Price':>12} | {'PNL':>15} | {'Leverage':>10} |")
|
|
output_lines.append("-" * pos_table_width)
|
|
|
|
perps_positions = self.open_positions.get('perpetuals_account', {}).get('open_positions', [])
|
|
spot_positions = self.open_positions.get('spot_account', {}).get('positions', [])
|
|
|
|
if not perps_positions and not spot_positions:
|
|
output_lines.append("No open positions found.")
|
|
else:
|
|
for pos in perps_positions:
|
|
try:
|
|
pnl = float(pos.get('pnl', 0.0))
|
|
pnl_str = f"${pnl:,.2f}"
|
|
except (ValueError, TypeError):
|
|
pnl_str = "Error"
|
|
|
|
coin = pos.get('coin') or '-'
|
|
size = pos.get('size') or '-'
|
|
entry_price = pos.get('entry_price') or '-'
|
|
mark_price = pos.get('mark_price') or '-'
|
|
leverage = pos.get('leverage') or '-'
|
|
|
|
output_lines.append(f"{'Perps':<10} | {coin:<6} | {size:>15} | {entry_price:>12} | {mark_price:>12} | {pnl_str:>15} | {leverage:>10} |")
|
|
|
|
for pos in spot_positions:
|
|
pnl = pos.get('pnl', 'N/A')
|
|
coin = pos.get('coin') or '-'
|
|
balance_size = pos.get('balance_size') or '-'
|
|
output_lines.append(f"{'Spot':<10} | {coin:<6} | {balance_size:>15} | {'-':>12} | {'-':>12} | {pnl:>15} | {'-':>10} |")
|
|
output_lines.append("-" * pos_table_width)
|
|
|
|
# --- REMOVED: Background Processes Section ---
|
|
|
|
final_output = "\n".join(output_lines)
|
|
print(final_output)
|
|
sys.stdout.flush()
|
|
|
|
def run(self):
|
|
"""Main loop to read data, display dashboard, and check processes."""
|
|
while True:
|
|
self.read_prices()
|
|
self.read_market_caps()
|
|
self.read_strategy_statuses()
|
|
self.read_executor_status()
|
|
# --- REMOVED: self.check_process_status() ---
|
|
self.display_dashboard()
|
|
time.sleep(0.5)
|
|
|
|
if __name__ == "__main__":
|
|
setup_logging('normal', 'MainApp')
|
|
|
|
if not os.path.exists(LOGS_DIR):
|
|
os.makedirs(LOGS_DIR)
|
|
|
|
processes = {}
|
|
strategy_configs = {}
|
|
|
|
try:
|
|
with open(STRATEGY_CONFIG_FILE, 'r') as f:
|
|
strategy_configs = json.load(f)
|
|
except (FileNotFoundError, json.JSONDecodeError) as e:
|
|
logging.error(f"Could not load strategies from '{STRATEGY_CONFIG_FILE}': {e}")
|
|
sys.exit(1)
|
|
|
|
# --- MODIFIED: Removed dynamic timeframe logic ---
|
|
# --- NEW: Hardcoded timeframes for the resampler ---
|
|
resampler_timeframes = [
|
|
"3m", "5m", "15m", "30m", "1h", "2h", "4h", "8h",
|
|
"12h", "1d", "3d", "1w", "1M", "148m", "37m"
|
|
]
|
|
logging.info(f"Using hardcoded timeframes for resampler: {resampler_timeframes}")
|
|
# --- END NEW ---
|
|
|
|
with multiprocessing.Manager() as manager:
|
|
shared_prices = manager.dict()
|
|
trade_signal_queue = manager.Queue()
|
|
|
|
processes["Live Market Feed"] = multiprocessing.Process(target=start_live_feed, args=(shared_prices, 'off'), daemon=True)
|
|
processes["Live Candle Fetcher"] = multiprocessing.Process(target=run_live_candle_fetcher, daemon=True)
|
|
# --- MODIFIED: Pass the new hardcoded list to the resampler process ---
|
|
processes["Resampler"] = multiprocessing.Process(target=resampler_scheduler, args=(resampler_timeframes,), daemon=True)
|
|
processes["Market Cap Fetcher"] = multiprocessing.Process(target=market_cap_fetcher_scheduler, daemon=True)
|
|
|
|
processes["Trade Executor"] = multiprocessing.Process(target=run_trade_executor, args=(trade_signal_queue,), daemon=True)
|
|
processes["Dashboard Data"] = multiprocessing.Process(target=run_dashboard_data_fetcher, daemon=True)
|
|
|
|
for name, config in strategy_configs.items():
|
|
if config.get("enabled", False):
|
|
if 'class' not in config:
|
|
logging.error(f"Strategy '{name}' is missing 'class' key. Skipping.")
|
|
continue
|
|
proc = multiprocessing.Process(target=run_strategy, args=(name, config, trade_signal_queue), daemon=True)
|
|
processes[f"Strategy: {name}"] = proc
|
|
|
|
for name, proc in processes.items():
|
|
logging.info(f"Starting process '{name}'...")
|
|
proc.start()
|
|
|
|
time.sleep(3)
|
|
|
|
app = MainApp(coins_to_watch=WATCHED_COINS, processes=processes, strategy_configs=strategy_configs, shared_prices=shared_prices)
|
|
try:
|
|
app.run()
|
|
except KeyboardInterrupt:
|
|
logging.info("Shutting down...")
|
|
for proc in processes.values():
|
|
if proc.is_alive(): proc.terminate()
|
|
for proc in processes.values():
|
|
if proc.is_alive(): proc.join()
|
|
logging.info("Shutdown complete.")
|
|
sys.exit(0)
|
|
|