updated fast orders

This commit is contained in:
2025-11-02 19:56:40 +01:00
parent 93363750ae
commit d650bb5fe2
6 changed files with 932 additions and 354 deletions

0
app.py
View File

165
base_strategy.py Normal file
View File

@ -0,0 +1,165 @@
from abc import ABC, abstractmethod
import pandas as pd
import json
import os
import logging
from datetime import datetime, timezone
import sqlite3
import multiprocessing
import time
from logging_utils import setup_logging
from hyperliquid.info import Info
from hyperliquid.utils import constants
class BaseStrategy(ABC):
"""
An abstract base class that defines the blueprint for all trading strategies.
It provides common functionality like loading data, saving status, and state management.
"""
def __init__(self, strategy_name: str, params: dict, trade_signal_queue: multiprocessing.Queue = None, shared_status: dict = None):
self.strategy_name = strategy_name
self.params = params
self.trade_signal_queue = trade_signal_queue
# Optional multiprocessing.Manager().dict() to hold live status (avoids file IO)
self.shared_status = shared_status
self.coin = params.get("coin", "N/A")
self.timeframe = params.get("timeframe", "N/A")
self.db_path = os.path.join("_data", "market_data.db")
self.status_file_path = os.path.join("_data", f"strategy_status_{self.strategy_name}.json")
self.current_signal = "INIT"
self.last_signal_change_utc = None
self.signal_price = None
# Note: Logging is set up by the run_strategy function
def load_data(self) -> pd.DataFrame:
"""Loads historical data for the configured coin and timeframe."""
table_name = f"{self.coin}_{self.timeframe}"
periods = [v for k, v in self.params.items() if 'period' in k or '_ma' in k or 'slow' in k or 'fast' in k]
limit = max(periods) + 50 if periods else 500
try:
with sqlite3.connect(f"file:{self.db_path}?mode=ro", uri=True) as conn:
query = f'SELECT * FROM "{table_name}" ORDER BY datetime_utc DESC LIMIT {limit}'
df = pd.read_sql(query, conn, parse_dates=['datetime_utc'])
if df.empty: return pd.DataFrame()
df.set_index('datetime_utc', inplace=True)
df.sort_index(inplace=True)
return df
except Exception as e:
logging.error(f"Failed to load data from table '{table_name}': {e}")
return pd.DataFrame()
@abstractmethod
def calculate_signals(self, df: pd.DataFrame) -> pd.DataFrame:
"""The core logic of the strategy. Must be implemented by child classes."""
pass
def calculate_signals_and_state(self, df: pd.DataFrame) -> bool:
"""
A wrapper that calls the strategy's signal calculation, determines
the last signal change, and returns True if the signal has changed.
"""
df_with_signals = self.calculate_signals(df)
df_with_signals.dropna(inplace=True)
if df_with_signals.empty:
return False
df_with_signals['position_change'] = df_with_signals['signal'].diff()
last_signal_int = df_with_signals['signal'].iloc[-1]
new_signal_str = "HOLD"
if last_signal_int == 1: new_signal_str = "BUY"
elif last_signal_int == -1: new_signal_str = "SELL"
signal_changed = False
if self.current_signal == "INIT":
if new_signal_str == "BUY": self.current_signal = "INIT_BUY"
elif new_signal_str == "SELL": self.current_signal = "INIT_SELL"
else: self.current_signal = "HOLD"
signal_changed = True
elif new_signal_str != self.current_signal:
self.current_signal = new_signal_str
signal_changed = True
if signal_changed:
last_change_series = df_with_signals[df_with_signals['position_change'] != 0]
if not last_change_series.empty:
last_change_row = last_change_series.iloc[-1]
self.last_signal_change_utc = last_change_row.name.tz_localize('UTC').isoformat()
self.signal_price = last_change_row['close']
return signal_changed
def _save_status(self):
"""Saves the current strategy state to its JSON file."""
status = {
"strategy_name": self.strategy_name,
"current_signal": self.current_signal,
"last_signal_change_utc": self.last_signal_change_utc,
"signal_price": self.signal_price,
"last_checked_utc": datetime.now(timezone.utc).isoformat()
}
# If a shared status dict is provided (Manager.dict()), update it instead of writing files
try:
if self.shared_status is not None:
try:
# store the status under the strategy name for easy lookup
self.shared_status[self.strategy_name] = status
except Exception:
# Manager proxies may not accept nested mutable objects consistently; assign a copy
self.shared_status[self.strategy_name] = dict(status)
else:
with open(self.status_file_path, 'w', encoding='utf-8') as f:
json.dump(status, f, indent=4)
except IOError as e:
logging.error(f"Failed to write status file for {self.strategy_name}: {e}")
def run_polling_loop(self):
"""
The default execution loop for polling-based strategies (e.g., SMAs).
"""
while True:
df = self.load_data()
if df.empty:
logging.warning("No data loaded. Waiting 1 minute...")
time.sleep(60)
continue
signal_changed = self.calculate_signals_and_state(df.copy())
self._save_status()
if signal_changed or self.current_signal == "INIT_BUY" or self.current_signal == "INIT_SELL":
logging.warning(f"New signal detected: {self.current_signal}")
self.trade_signal_queue.put({
"strategy_name": self.strategy_name,
"signal": self.current_signal,
"coin": self.coin,
"signal_price": self.signal_price,
"config": {"agent": self.params.get("agent"), "parameters": self.params}
})
if self.current_signal == "INIT_BUY": self.current_signal = "BUY"
if self.current_signal == "INIT_SELL": self.current_signal = "SELL"
logging.info(f"Current Signal: {self.current_signal}")
time.sleep(60)
def run_event_loop(self):
"""
A placeholder for event-driven (WebSocket) strategies.
Child classes must override this.
"""
logging.error("run_event_loop() is not implemented for this strategy.")
time.sleep(3600) # Sleep for an hour to prevent rapid error loops
def on_fill_message(self, message):
"""
Placeholder for the WebSocket callback.
Child classes must override this.
"""
pass

View File

@ -10,6 +10,8 @@ import sqlite3
import pandas as pd import pandas as pd
from datetime import datetime, timezone from datetime import datetime, timezone
import importlib import importlib
# --- REMOVED: import signal ---
# --- REMOVED: from queue import Empty ---
from logging_utils import setup_logging from logging_utils import setup_logging
# --- Using the new high-performance WebSocket utility for live prices --- # --- Using the new high-performance WebSocket utility for live prices ---
@ -22,7 +24,7 @@ WATCHED_COINS = ["BTC", "ETH", "SOL", "BNB", "HYPE", "ASTER", "ZEC", "PUMP", "SU
LIVE_CANDLE_FETCHER_SCRIPT = "live_candle_fetcher.py" LIVE_CANDLE_FETCHER_SCRIPT = "live_candle_fetcher.py"
RESAMPLER_SCRIPT = "resampler.py" RESAMPLER_SCRIPT = "resampler.py"
MARKET_CAP_FETCHER_SCRIPT = "market_cap_fetcher.py" MARKET_CAP_FETCHER_SCRIPT = "market_cap_fetcher.py"
TRADE_EXECUTOR_SCRIPT = "trade_executor.py" # --- REMOVED: trade_executor.py is no longer a script ---
DASHBOARD_DATA_FETCHER_SCRIPT = "dashboard_data_fetcher.py" DASHBOARD_DATA_FETCHER_SCRIPT = "dashboard_data_fetcher.py"
STRATEGY_CONFIG_FILE = os.path.join("_data", "strategies.json") STRATEGY_CONFIG_FILE = os.path.join("_data", "strategies.json")
DB_PATH = os.path.join("_data", "market_data.db") DB_PATH = os.path.join("_data", "market_data.db")
@ -46,26 +48,61 @@ def format_market_cap(mc_value):
def run_live_candle_fetcher(): def run_live_candle_fetcher():
"""Target function to run the live_candle_fetcher.py script in a resilient loop.""" """Target function to run the live_candle_fetcher.py script in a resilient loop."""
# --- GRACEFUL SHUTDOWN HANDLER ---
import signal
shutdown_requested = False
def handle_shutdown_signal(signum, frame):
nonlocal shutdown_requested
# Use print here as logging may not be set up
print(f"[CandleFetcher] Shutdown signal ({signum}) received. Will stop after current run.")
shutdown_requested = True
signal.signal(signal.SIGTERM, handle_shutdown_signal)
signal.signal(signal.SIGINT, handle_shutdown_signal)
# --- END GRACEFUL SHUTDOWN HANDLER ---
log_file = os.path.join(LOGS_DIR, "live_candle_fetcher.log") log_file = os.path.join(LOGS_DIR, "live_candle_fetcher.log")
while True:
while not shutdown_requested: # <-- MODIFIED
process = None
try: try:
with open(log_file, 'a') as f: with open(log_file, 'a') as f:
# We can't get coins from strategies.json here, so we pass the default list
command = [sys.executable, LIVE_CANDLE_FETCHER_SCRIPT, "--coins"] + WATCHED_COINS + ["--log-level", "off"] command = [sys.executable, LIVE_CANDLE_FETCHER_SCRIPT, "--coins"] + WATCHED_COINS + ["--log-level", "off"]
f.write(f"\n--- Starting {LIVE_CANDLE_FETCHER_SCRIPT} at {datetime.now()} ---\n") f.write(f"\n--- Starting {LIVE_CANDLE_FETCHER_SCRIPT} at {datetime.now()} ---\n")
subprocess.run(command, check=True, stdout=f, stderr=subprocess.STDOUT)
# Use Popen instead of run to be non-blocking
process = subprocess.Popen(command, stdout=f, stderr=subprocess.STDOUT)
# Poll the process and check for shutdown request
while process.poll() is None and not shutdown_requested:
time.sleep(0.5) # Poll every 500ms
if shutdown_requested and process.poll() is None:
print(f"[CandleFetcher] Terminating subprocess {LIVE_CANDLE_FETCHER_SCRIPT}...")
process.terminate() # Terminate the child script
process.wait() # Wait for it to exit
print(f"[CandleFetcher] Subprocess terminated.")
except (subprocess.CalledProcessError, Exception) as e: except (subprocess.CalledProcessError, Exception) as e:
if shutdown_requested:
break # Don't restart if we're shutting down
with open(log_file, 'a') as f: with open(log_file, 'a') as f:
f.write(f"\n--- PROCESS ERROR at {datetime.now()} ---\n") f.write(f"\n--- PROCESS ERROR at {datetime.now()} ---\n")
f.write(f"Live candle fetcher failed: {e}. Restarting...\n") f.write(f"Live candle fetcher failed: {e}. Restarting...\n")
time.sleep(5) time.sleep(5)
if shutdown_requested:
break # Exit outer loop
print("[CandleFetcher] Live candle fetcher shutting down.")
def run_resampler_job(timeframes_to_generate: list): def run_resampler_job(timeframes_to_generate: list):
"""Defines the job for the resampler, redirecting output to a log file.""" """Defines the job for the resampler, redirecting output to a log file."""
log_file = os.path.join(LOGS_DIR, "resampler.log") log_file = os.path.join(LOGS_DIR, "resampler.log")
try: try:
# --- MODIFIED: No longer needs to check for empty list, coins are from WATCHED_COINS ---
command = [sys.executable, RESAMPLER_SCRIPT, "--coins"] + WATCHED_COINS + ["--timeframes"] + timeframes_to_generate + ["--log-level", "normal"] command = [sys.executable, RESAMPLER_SCRIPT, "--coins"] + WATCHED_COINS + ["--timeframes"] + timeframes_to_generate + ["--log-level", "normal"]
with open(log_file, 'a') as f: with open(log_file, 'a') as f:
f.write(f"\n--- Starting resampler.py job at {datetime.now()} ---\n") f.write(f"\n--- Starting resampler.py job at {datetime.now()} ---\n")
@ -78,19 +115,34 @@ def run_resampler_job(timeframes_to_generate: list):
def resampler_scheduler(timeframes_to_generate: list): def resampler_scheduler(timeframes_to_generate: list):
"""Schedules the resampler.py script.""" """Schedules the resampler.py script."""
setup_logging('off', 'ResamplerScheduler')
# --- GRACEFUL SHUTDOWN HANDLER ---
import signal
shutdown_requested = False
if not timeframes_to_generate: def handle_shutdown_signal(signum, frame):
logging.warning("Resampler scheduler started but no timeframes were provided to generate. The process will idle.") nonlocal shutdown_requested
return # Exit the function if there's nothing to do try:
logging.info(f"Shutdown signal ({signum}) received. Exiting loop...")
except NameError:
print(f"[ResamplerScheduler] Shutdown signal ({signum}) received. Exiting loop...")
shutdown_requested = True
signal.signal(signal.SIGTERM, handle_shutdown_signal)
signal.signal(signal.SIGINT, handle_shutdown_signal)
# --- END GRACEFUL SHUTDOWN HANDLER ---
setup_logging('off', 'ResamplerScheduler')
run_resampler_job(timeframes_to_generate) run_resampler_job(timeframes_to_generate)
# Schedule to run every minute at the :01 second mark # Schedule to run every minute at the :01 second mark
schedule.every().minute.at(":01").do(run_resampler_job, timeframes_to_generate=timeframes_to_generate) schedule.every().minute.at(":01").do(run_resampler_job, timeframes_to_generate=timeframes_to_generate)
logging.info(f"Resampler scheduled to run every minute at :01 for {timeframes_to_generate}.") logging.info("Resampler scheduled to run every minute at :01.")
while True:
while not shutdown_requested: # <-- MODIFIED
schedule.run_pending() schedule.run_pending()
time.sleep(1) # Check every second to not miss the scheduled time time.sleep(0.5) # Check every 500ms to not miss the scheduled time and be responsive
logging.info("ResamplerScheduler shutting down.")
def run_market_cap_fetcher_job(): def run_market_cap_fetcher_job():
@ -109,35 +161,128 @@ def run_market_cap_fetcher_job():
def market_cap_fetcher_scheduler(): def market_cap_fetcher_scheduler():
"""Schedules the market_cap_fetcher.py script to run daily at a specific UTC time.""" """Schedules the market_cap_fetcher.py script to run daily at a specific UTC time."""
# --- GRACEFUL SHUTDOWN HANDLER ---
import signal
shutdown_requested = False
def handle_shutdown_signal(signum, frame):
nonlocal shutdown_requested
try:
logging.info(f"Shutdown signal ({signum}) received. Exiting loop...")
except NameError:
print(f"[MarketCapScheduler] Shutdown signal ({signum}) received. Exiting loop...")
shutdown_requested = True
signal.signal(signal.SIGTERM, handle_shutdown_signal)
signal.signal(signal.SIGINT, handle_shutdown_signal)
# --- END GRACEFUL SHUTDOWN HANDLER ---
setup_logging('off', 'MarketCapScheduler') setup_logging('off', 'MarketCapScheduler')
schedule.every().day.at("00:15", "UTC").do(run_market_cap_fetcher_job) schedule.every().day.at("00:15", "UTC").do(run_market_cap_fetcher_job)
while True:
while not shutdown_requested: # <-- MODIFIED
schedule.run_pending() schedule.run_pending()
time.sleep(60) # Sleep for 60 seconds, but check for shutdown flag every second
for _ in range(60):
if shutdown_requested:
break
time.sleep(1)
logging.info("MarketCapScheduler shutting down.")
def run_trade_executor(trade_signal_queue): def run_trade_executor(order_execution_queue: multiprocessing.Queue):
""" """
Target function to run the trade_executor.py script in a resilient loop. Target function to run the TradeExecutor class in a resilient loop.
It passes the shared signal queue to the executor. It now consumes from the order_execution_queue.
""" """
log_file = os.path.join(LOGS_DIR, "trade_executor.log")
# --- GRACEFUL SHUTDOWN HANDLER ---
import signal
def handle_shutdown_signal(signum, frame):
# We can just raise KeyboardInterrupt, as it's handled below
logging.info(f"Shutdown signal ({signum}) received. Initiating graceful exit...")
raise KeyboardInterrupt
signal.signal(signal.SIGTERM, handle_shutdown_signal)
# --- END GRACEFUL SHUTDOWN HANDLER ---
log_file_path = os.path.join(LOGS_DIR, "trade_executor.log")
try:
sys.stdout = open(log_file_path, 'a', buffering=1)
sys.stderr = sys.stdout
except Exception as e:
print(f"Failed to open log file for TradeExecutor: {e}")
setup_logging('normal', f"TradeExecutor")
logging.info("\n--- Starting Trade Executor process ---")
while True: while True:
try: try:
with open(log_file, 'a') as f: from trade_executor import TradeExecutor
f.write(f"\n--- Starting Trade Executor at {datetime.now()} ---\n")
executor = TradeExecutor(log_level="normal", order_execution_queue=order_execution_queue)
from trade_executor import TradeExecutor
# --- REVERTED: Call executor.run() directly ---
executor = TradeExecutor(log_level="normal", trade_signal_queue=trade_signal_queue) executor.run()
executor.run() # This will block and run forever
except KeyboardInterrupt:
except (subprocess.CalledProcessError, Exception) as e: logging.info("Trade Executor interrupted. Exiting.")
with open(log_file, 'a') as f: return
f.write(f"\n--- PROCESS ERROR at {datetime.now()} ---\n") except Exception as e:
f.write(f"Trade Executor failed: {e}. Restarting...\n") logging.error(f"Trade Executor failed: {e}. Restarting...\n", exc_info=True)
time.sleep(10) time.sleep(10)
def run_position_manager(trade_signal_queue: multiprocessing.Queue, order_execution_queue: multiprocessing.Queue):
"""
Target function to run the PositionManager class in a resilient loop.
Consumes from trade_signal_queue, produces for order_execution_queue.
"""
# --- GRACEFUL SHUTDOWN HANDLER ---
import signal
def handle_shutdown_signal(signum, frame):
# Raise KeyboardInterrupt, as it's handled by the loop
logging.info(f"Shutdown signal ({signum}) received. Initiating graceful exit...")
raise KeyboardInterrupt
signal.signal(signal.SIGTERM, handle_shutdown_signal)
# --- END GRACEFUL SHUTDOWN HANDLER ---
log_file_path = os.path.join(LOGS_DIR, "position_manager.log")
try:
sys.stdout = open(log_file_path, 'a', buffering=1)
sys.stderr = sys.stdout
except Exception as e:
print(f"Failed to open log file for PositionManager: {e}")
setup_logging('normal', f"PositionManager")
logging.info("\n--- Starting Position Manager process ---")
while True:
try:
from position_manager import PositionManager
manager = PositionManager(
log_level="normal",
trade_signal_queue=trade_signal_queue,
order_execution_queue=order_execution_queue
)
# --- REVERTED: Call manager.run() directly ---
manager.run()
except KeyboardInterrupt:
logging.info("Position Manager interrupted. Exiting.")
return
except Exception as e:
logging.error(f"Position Manager failed: {e}. Restarting...\n", exc_info=True)
time.sleep(10)
def run_strategy(strategy_name: str, config: dict, trade_signal_queue: multiprocessing.Queue): def run_strategy(strategy_name: str, config: dict, trade_signal_queue: multiprocessing.Queue):
""" """
This function BECOMES the strategy runner. It is executed as a separate This function BECOMES the strategy runner. It is executed as a separate
@ -149,9 +294,22 @@ def run_strategy(strategy_name: str, config: dict, trade_signal_queue: multiproc
import sys import sys
import time import time
import logging import logging
import signal # <-- ADDED
from logging_utils import setup_logging from logging_utils import setup_logging
from strategies.base_strategy import BaseStrategy from strategies.base_strategy import BaseStrategy
# --- GRACEFUL SHUTDOWN HANDLER ---
def handle_shutdown_signal(signum, frame):
# Raise KeyboardInterrupt, as it's handled by the loop
try:
logging.info(f"Shutdown signal ({signum}) received. Initiating graceful exit...")
except NameError:
print(f"[Strategy-{strategy_name}] Shutdown signal ({signum}) received. Initiating graceful exit...")
raise KeyboardInterrupt
signal.signal(signal.SIGTERM, handle_shutdown_signal)
# --- END GRACEFUL SHUTDOWN HANDLER ---
# --- Setup logging to file for this specific process --- # --- Setup logging to file for this specific process ---
log_file_path = os.path.join(LOGS_DIR, f"strategy_{strategy_name}.log") log_file_path = os.path.join(LOGS_DIR, f"strategy_{strategy_name}.log")
try: try:
@ -183,10 +341,12 @@ def run_strategy(strategy_name: str, config: dict, trade_signal_queue: multiproc
logging.info(f"Starting POLLING logic loop...") logging.info(f"Starting POLLING logic loop...")
strategy.run_polling_loop() # This is the original blocking call strategy.run_polling_loop() # This is the original blocking call
# --- REVERTED: Added back simple KeyboardInterrupt handler ---
except KeyboardInterrupt: except KeyboardInterrupt:
logging.info("Strategy process stopping.") logging.info(f"Strategy {strategy_name} process stopping.")
return return
except Exception as e: except Exception as e:
# --- REVERTED: Removed specific check for KeyboardInterrupt ---
logging.error(f"Strategy '{strategy_name}' failed: {e}", exc_info=True) logging.error(f"Strategy '{strategy_name}' failed: {e}", exc_info=True)
logging.info("Restarting strategy in 10 seconds...") logging.info("Restarting strategy in 10 seconds...")
time.sleep(10) time.sleep(10)
@ -194,12 +354,30 @@ def run_strategy(strategy_name: str, config: dict, trade_signal_queue: multiproc
def run_dashboard_data_fetcher(): def run_dashboard_data_fetcher():
"""Target function to run the dashboard_data_fetcher.py script.""" """Target function to run the dashboard_data_fetcher.py script."""
# --- GRACEFUL SHUTDOWN HANDLER ---
import signal
def handle_shutdown_signal(signum, frame):
# Raise KeyboardInterrupt, as it's handled by the loop
try:
logging.info(f"Shutdown signal ({signum}) received. Initiating graceful exit...")
except NameError:
print(f"[DashboardDataFetcher] Shutdown signal ({signum}) received. Initiating graceful exit...")
raise KeyboardInterrupt
signal.signal(signal.SIGTERM, handle_shutdown_signal)
# --- END GRACEFUL SHUTDOWN HANDLER ---
log_file = os.path.join(LOGS_DIR, "dashboard_data_fetcher.log") log_file = os.path.join(LOGS_DIR, "dashboard_data_fetcher.log")
while True: while True:
try: try:
with open(log_file, 'a') as f: with open(log_file, 'a') as f:
f.write(f"\n--- Starting Dashboard Data Fetcher at {datetime.now()} ---\n") f.write(f"\n--- Starting Dashboard Data Fetcher at {datetime.now()} ---\n")
subprocess.run([sys.executable, DASHBOARD_DATA_FETCHER_SCRIPT, "--log-level", "normal"], check=True, stdout=f, stderr=subprocess.STDOUT) subprocess.run([sys.executable, DASHBOARD_DATA_FETCHER_SCRIPT, "--log-level", "normal"], check=True, stdout=f, stderr=subprocess.STDOUT)
except KeyboardInterrupt: # --- MODIFIED: Added to catch interrupt ---
logging.info("Dashboard Data Fetcher stopping.")
break
except (subprocess.CalledProcessError, Exception) as e: except (subprocess.CalledProcessError, Exception) as e:
with open(log_file, 'a') as f: with open(log_file, 'a') as f:
f.write(f"\n--- PROCESS ERROR at {datetime.now()} ---\n") f.write(f"\n--- PROCESS ERROR at {datetime.now()} ---\n")
@ -320,39 +498,27 @@ class MainApp:
config_params = self.strategy_configs.get(name, {}).get('parameters', {}) config_params = self.strategy_configs.get(name, {}).get('parameters', {})
# --- NEW ROBUST LOGIC --- # --- FIX: Read coin/size from status file first, fallback to config ---
# 1. Get Timeframe (always from config)
timeframe = config_params.get('timeframe', 'N/A')
# 2. Get Coin: Try status file first (live), then config file (static)
coin = status.get('coin', config_params.get('coin', 'N/A')) coin = status.get('coin', config_params.get('coin', 'N/A'))
# 3. Get Size: Try status file first, then config file # --- FIX: Handle nested 'coins_to_copy' logic for size ---
size_from_status = status.get('size', None) if 'coins_to_copy' in config_params:
size_from_config = config_params.get('size', None) size = status.get('size', 'Multi')
else:
size = config_params.get('size', 'N/A')
size = "N/A" timeframe = config_params.get('timeframe', 'N/A')
if size_from_status is not None:
size = size_from_status # Use live status from copy_trader
elif size_from_config is not None:
size = size_from_config # Use config from simple strategy
elif 'coins_to_copy' in config_params:
# Special case: copy_trader, but status file is old (no 'size' field)
if coin != 'N/A' and coin != 'Multi':
# Try to find size in config if we know the coin from status
# --- SYNTAX FIX: Removed extra ".get(" ---
size = config_params.get('coins_to_copy', {}).get(coin, {}).get('size', 'Multi')
else:
coin = 'Multi' # It's a copy trader, but we don't know the coin
size = 'Multi'
size_display = f"{size:>8}" if isinstance(size, (int, float)) else f"{str(size):>8}"
# --- END OF NEW LOGIC ---
# --- FIX: Removed parameter string logic --- # --- FIX: Removed parameter string logic ---
# --- FIX: Removed 'params_str' from the formatted line --- # --- FIX: Removed 'params_str' from the formatted line ---
right_table_lines.append(f"{i:^2} | {name:<25} | {coin:^6} | {signal:^8} | {price_display:>12} | {last_change_display:>17} | {timeframe:^5} | {size_display} |")
size_display = f"{size:>8}"
if isinstance(size, (int, float)):
size_display = f"{size:>8.4f}" # Format size to 4 decimal places
# --- END NEW LOGIC ---
right_table_lines.append(f"{i:^2} | {name:<25} | {coin:^6} | {signal:^8} | {price_display:>12} | {last_change_display:>17} | {timeframe:^5} | {size:>8} |")
right_table_lines.append("-" * right_table_width) right_table_lines.append("-" * right_table_width)
output_lines = [] output_lines = []
@ -370,35 +536,7 @@ class MainApp:
output_lines.append(f"{'Account':<10} | {'Coin':<6} | {'Size':>15} | {'Entry Price':>12} | {'Mark Price':>12} | {'PNL':>15} | {'Leverage':>10} |") output_lines.append(f"{'Account':<10} | {'Coin':<6} | {'Size':>15} | {'Entry Price':>12} | {'Mark Price':>12} | {'PNL':>15} | {'Leverage':>10} |")
output_lines.append("-" * pos_table_width) output_lines.append("-" * pos_table_width)
perps_positions = self.open_positions.get('perpetuals_account', {}).get('open_positions', []) # --- REMOVED: Background Processes section ---
spot_positions = self.open_positions.get('spot_account', {}).get('positions', [])
if not perps_positions and not spot_positions:
output_lines.append("No open positions found.")
else:
for pos in perps_positions:
try:
pnl = float(pos.get('pnl', 0.0))
pnl_str = f"${pnl:,.2f}"
except (ValueError, TypeError):
pnl_str = "Error"
coin = pos.get('coin') or '-'
size = pos.get('size') or '-'
entry_price = pos.get('entry_price') or '-'
mark_price = pos.get('mark_price') or '-'
leverage = pos.get('leverage') or '-'
output_lines.append(f"{'Perps':<10} | {coin:<6} | {size:>15} | {entry_price:>12} | {mark_price:>12} | {pnl_str:>15} | {leverage:>10} |")
for pos in spot_positions:
pnl = pos.get('pnl', 'N/A')
coin = pos.get('coin') or '-'
balance_size = pos.get('balance_size') or '-'
output_lines.append(f"{'Spot':<10} | {coin:<6} | {balance_size:>15} | {'-':>12} | {'-':>12} | {pnl:>15} | {'-':>10} |")
output_lines.append("-" * pos_table_width)
# --- REMOVED: Background Processes Section ---
final_output = "\n".join(output_lines) final_output = "\n".join(output_lines)
print(final_output) print(final_output)
@ -422,7 +560,7 @@ if __name__ == "__main__":
os.makedirs(LOGS_DIR) os.makedirs(LOGS_DIR)
processes = {} processes = {}
strategy_configs = {} # --- REVERTED: Removed process groups ---
try: try:
with open(STRATEGY_CONFIG_FILE, 'r') as f: with open(STRATEGY_CONFIG_FILE, 'r') as f:
@ -431,27 +569,37 @@ if __name__ == "__main__":
logging.error(f"Could not load strategies from '{STRATEGY_CONFIG_FILE}': {e}") logging.error(f"Could not load strategies from '{STRATEGY_CONFIG_FILE}': {e}")
sys.exit(1) sys.exit(1)
# --- MODIFIED: Removed dynamic timeframe logic --- # --- FIX: Hardcoded timeframes ---
# --- NEW: Hardcoded timeframes for the resampler --- required_timeframes = [
resampler_timeframes = [
"3m", "5m", "15m", "30m", "1h", "2h", "4h", "8h", "3m", "5m", "15m", "30m", "1h", "2h", "4h", "8h",
"12h", "1d", "3d", "1w", "1M", "148m", "37m" "12h", "1d", "3d", "1w", "1M", "148m", "37m"
] ]
logging.info(f"Using hardcoded timeframes for resampler: {resampler_timeframes}") logging.info(f"Using fixed timeframes for resampler: {required_timeframes}")
# --- END NEW ---
with multiprocessing.Manager() as manager: with multiprocessing.Manager() as manager:
shared_prices = manager.dict() shared_prices = manager.dict()
# --- FIX: Create TWO queues ---
trade_signal_queue = manager.Queue() trade_signal_queue = manager.Queue()
order_execution_queue = manager.Queue()
# --- REVERTED: All processes are daemon=True and in one dict ---
processes["Live Market Feed"] = multiprocessing.Process(target=start_live_feed, args=(shared_prices, 'off'), daemon=True) processes["Live Market Feed"] = multiprocessing.Process(target=start_live_feed, args=(shared_prices, 'off'), daemon=True)
processes["Live Candle Fetcher"] = multiprocessing.Process(target=run_live_candle_fetcher, daemon=True) processes["Live Candle Fetcher"] = multiprocessing.Process(target=run_live_candle_fetcher, daemon=True)
# --- MODIFIED: Pass the new hardcoded list to the resampler process --- processes["Resampler"] = multiprocessing.Process(target=resampler_scheduler, args=(list(required_timeframes),), daemon=True)
processes["Resampler"] = multiprocessing.Process(target=resampler_scheduler, args=(resampler_timeframes,), daemon=True)
processes["Market Cap Fetcher"] = multiprocessing.Process(target=market_cap_fetcher_scheduler, daemon=True) processes["Market Cap Fetcher"] = multiprocessing.Process(target=market_cap_fetcher_scheduler, daemon=True)
processes["Trade Executor"] = multiprocessing.Process(target=run_trade_executor, args=(trade_signal_queue,), daemon=True)
processes["Dashboard Data"] = multiprocessing.Process(target=run_dashboard_data_fetcher, daemon=True) processes["Dashboard Data"] = multiprocessing.Process(target=run_dashboard_data_fetcher, daemon=True)
processes["Position Manager"] = multiprocessing.Process(
target=run_position_manager,
args=(trade_signal_queue, order_execution_queue),
daemon=True
)
processes["Trade Executor"] = multiprocessing.Process(
target=run_trade_executor,
args=(order_execution_queue,),
daemon=True
)
for name, config in strategy_configs.items(): for name, config in strategy_configs.items():
if config.get("enabled", False): if config.get("enabled", False):
@ -459,7 +607,9 @@ if __name__ == "__main__":
logging.error(f"Strategy '{name}' is missing 'class' key. Skipping.") logging.error(f"Strategy '{name}' is missing 'class' key. Skipping.")
continue continue
proc = multiprocessing.Process(target=run_strategy, args=(name, config, trade_signal_queue), daemon=True) proc = multiprocessing.Process(target=run_strategy, args=(name, config, trade_signal_queue), daemon=True)
processes[f"Strategy: {name}"] = proc processes[f"Strategy: {name}"] = proc # Add to strategy group
# --- REVERTED: Removed combined dict ---
for name, proc in processes.items(): for name, proc in processes.items():
logging.info(f"Starting process '{name}'...") logging.info(f"Starting process '{name}'...")
@ -471,11 +621,47 @@ if __name__ == "__main__":
try: try:
app.run() app.run()
except KeyboardInterrupt: except KeyboardInterrupt:
# --- MODIFIED: Staged shutdown ---
logging.info("Shutting down...") logging.info("Shutting down...")
for proc in processes.values():
if proc.is_alive(): proc.terminate() strategy_procs = {}
for proc in processes.values(): other_procs = {}
if proc.is_alive(): proc.join() for name, proc in processes.items():
if name.startswith("Strategy:"):
strategy_procs[name] = proc
else:
other_procs[name] = proc
# --- 1. Terminate strategy processes ---
logging.info("Shutting down strategy processes first...")
for name, proc in strategy_procs.items():
if proc.is_alive():
logging.info(f"Terminating process: '{name}'...")
proc.terminate()
# --- 2. Wait for 5 seconds ---
logging.info("Waiting 5 seconds for strategies to close...")
time.sleep(5)
# --- 3. Terminate all other processes ---
logging.info("Shutting down remaining core processes...")
for name, proc in other_procs.items():
if proc.is_alive():
logging.info(f"Terminating process: '{name}'...")
proc.terminate()
# --- 4. Join all processes (strategies and others) ---
logging.info("Waiting for all processes to join...")
for name, proc in processes.items(): # Iterate over the original dict to get all
if proc.is_alive():
logging.info(f"Waiting for process '{name}' to join...")
proc.join(timeout=5) # Wait up to 5 seconds
if proc.is_alive():
# If it's still alive, force kill
logging.warning(f"Process '{name}' did not terminate, forcing kill.")
proc.kill()
# --- END MODIFIED ---
logging.info("Shutdown complete.") logging.info("Shutdown complete.")
sys.exit(0) sys.exit(0)

168
position_manager.py Normal file
View File

@ -0,0 +1,168 @@
import logging
import os
import sys
import json
import time
import multiprocessing
import numpy as np # Import numpy to handle np.float64
from datetime import datetime, timezone
from logging_utils import setup_logging
from trade_log import log_trade
class PositionManager:
"""
Listens for strategy signals, READS the current position state,
and sends explicit execution orders to the TradeExecutor.
It does NOT write to the position state file.
"""
def __init__(self, log_level: str, trade_signal_queue: multiprocessing.Queue, order_execution_queue: multiprocessing.Queue):
# Note: Logging is set up by the run_position_manager function
self.trade_signal_queue = trade_signal_queue
self.order_execution_queue = order_execution_queue
self.opened_positions_file = os.path.join("_data", "opened_positions.json")
# --- MODIFIED: Load state, but will not save it ---
self.opened_positions = self._load_opened_positions()
if self.opened_positions:
logging.info(f"Position Manager started. Loaded {len(self.opened_positions)} open positions (read-only).")
else:
logging.info("Position Manager started. No initial positions found.")
def _load_opened_positions(self) -> dict:
"""Loads the state of currently managed positions from a JSON file."""
if not os.path.exists(self.opened_positions_file):
return {}
try:
with open(self.opened_positions_file, 'r', encoding='utf-8') as f:
return json.load(f)
except (json.JSONDecodeError, IOError) as e:
logging.error(f"Failed to read '{self.opened_positions_file}': {e}. Starting with empty state.", exc_info=True)
return {}
# --- REMOVED: _save_opened_positions method ---
# (The TradeExecutor is now responsible for saving)
def send_order(self, order_data: dict):
"""Helper function to put a standardized order onto the execution queue."""
logging.info(f"Sending order to executor: {order_data}")
self.order_execution_queue.put(order_data)
def run(self):
"""
Main execution loop. Blocks and waits for a signal from the queue.
Converts strategy signals into execution orders based on current state.
"""
logging.info("Position Manager started. Waiting for signals...")
while True:
try:
trade_signal = self.trade_signal_queue.get()
if not trade_signal:
continue
logging.info(f"Received signal: {trade_signal}")
# --- NEW: Reload the position state on every signal ---
# This ensures we have the most up-to-date state from the Executor
self.opened_positions = self._load_opened_positions()
name = trade_signal['strategy_name']
config = trade_signal['config']
params = config['parameters']
coin = trade_signal['coin'].upper()
desired_signal = trade_signal['signal']
signal_price = trade_signal.get('signal_price')
if isinstance(signal_price, np.float64):
signal_price = float(signal_price)
if not signal_price or signal_price <= 0:
logging.warning(f"[{name}] Signal received with invalid price ({signal_price}). Skipping.")
continue
# --- Handle copy_trader's nested config ---
if 'coins_to_copy' in params:
# ... (omitted for brevity, this logic is correct and unchanged) ...
matching_coin_key = next((k for k in params['coins_to_copy'] if k.upper() == coin), None)
if matching_coin_key:
coin_config = params['coins_to_copy'][matching_coin_key]
params['size'] = coin_config.get('size')
params['leverage_long'] = coin_config.get('leverage_long', 2)
params['leverage_short'] = coin_config.get('leverage_short', 2)
size = params.get('size')
if not size:
logging.error(f"[{name}] Signal received with no 'size'. Skipping trade.")
continue
leverage_long = int(params.get('leverage_long', 2))
leverage_short = int(params.get('leverage_short', 2))
agent_name = (config.get("agent") or "default").lower()
# --- NEW: Stateful decision making ---
position_key = f"{name}_{coin}"
current_position = self.opened_positions.get(position_key)
logging.info(f"[{name}] Processing signal '{desired_signal}'. Current state: {current_position['side'] if current_position else 'FLAT'}")
order_data = {
"agent": agent_name,
"coin": coin,
"limit_px": signal_price,
# --- NEW: Pass all context to the executor ---
"strategy": name,
"position_key": position_key,
"open_price": signal_price,
"open_time_utc": datetime.now(timezone.utc).isoformat(),
"amount": size
}
if desired_signal == "OPEN_LONG":
if current_position:
logging.info(f"[{name}] Ignoring OPEN_LONG signal, already in a position.")
continue
logging.warning(f"[{name}] ACTION: Setting leverage to {leverage_long}x and opening LONG.")
self.send_order({**order_data, "action": "update_leverage", "is_buy": True, "size": leverage_long})
self.send_order({**order_data, "action": "market_open", "is_buy": True, "size": size})
log_trade(strategy=name, coin=coin, action="OPEN_LONG", price=signal_price, size=size, signal=desired_signal)
elif desired_signal == "OPEN_SHORT":
if current_position:
logging.info(f"[{name}] Ignoring OPEN_SHORT signal, already in a position.")
continue
logging.warning(f"[{name}] ACTION: Setting leverage to {leverage_short}x and opening SHORT.")
self.send_order({**order_data, "action": "update_leverage", "is_buy": False, "size": leverage_short})
self.send_order({**order_data, "action": "market_open", "is_buy": False, "size": size})
log_trade(strategy=name, coin=coin, action="OPEN_SHORT", price=signal_price, size=size, signal=desired_signal)
elif desired_signal == "CLOSE_LONG":
if not current_position or current_position['side'] != 'long':
logging.info(f"[{name}] Ignoring CLOSE_LONG signal, not in a long position.")
continue
logging.warning(f"[{name}] ACTION: Closing LONG position.")
self.send_order({**order_data, "action": "market_close", "is_buy": False, "size": size})
log_trade(strategy=name, coin=coin, action="CLOSE_LONG", price=signal_price, size=size, signal=desired_signal)
elif desired_signal == "CLOSE_SHORT":
if not current_position or current_position['side'] != 'short':
logging.info(f"[{name}] Ignoring CLOSE_SHORT signal, not in a short position.")
continue
logging.warning(f"[{name}] ACTION: Closing SHORT position.")
self.send_order({**order_data, "action": "market_close", "is_buy": True, "size": size})
log_trade(strategy=name, coin=coin, action="CLOSE_SHORT", price=signal_price, size=size, signal=desired_signal)
else:
logging.warning(f"[{name}] Received unhandled signal '{desired_signal}'. No action taken.")
except Exception as e:
logging.error(f"An error occurred in the position manager loop: {e}", exc_info=True)
time.sleep(1)

View File

@ -1,6 +1,7 @@
import logging import logging
import time import time
import json import json
import os
from datetime import datetime, timezone from datetime import datetime, timezone
from hyperliquid.info import Info from hyperliquid.info import Info
from hyperliquid.utils import constants from hyperliquid.utils import constants
@ -12,13 +13,16 @@ class CopyTraderStrategy(BaseStrategy):
An event-driven strategy that monitors a target wallet address and An event-driven strategy that monitors a target wallet address and
copies its trades for a specific set of allowed coins, using copies its trades for a specific set of allowed coins, using
per-coin size and leverage settings. per-coin size and leverage settings.
This strategy is STATEFUL and tracks its own positions.
""" """
def __init__(self, strategy_name: str, params: dict, trade_signal_queue, shared_status: dict = None): def __init__(self, strategy_name: str, params: dict, trade_signal_queue, shared_status: dict = None):
super().__init__(strategy_name, params, trade_signal_queue, shared_status) super().__init__(strategy_name, params, trade_signal_queue, shared_status)
self.target_address = self.params.get("target_address", "").lower() self.target_address = self.params.get("target_address", "").lower()
self.coins_to_copy = self.params.get("coins_to_copy", {}) self.coins_to_copy = self.params.get("coins_to_copy", {})
# Convert all coin keys to uppercase for consistency
self.coins_to_copy = {k.upper(): v for k, v in self.coins_to_copy.items()}
self.allowed_coins = list(self.coins_to_copy.keys()) self.allowed_coins = list(self.coins_to_copy.keys())
if not self.target_address: if not self.target_address:
@ -29,17 +33,63 @@ class CopyTraderStrategy(BaseStrategy):
self.info = None # Will be initialized in the run loop self.info = None # Will be initialized in the run loop
# --- FIX: Set initial state to "WAIT" --- # --- MODIFIED: Load and manage its own position state ---
self.current_signal = "WAIT" self.position_state_file = os.path.join("_data", f"strategy_state_{self.strategy_name}.json")
self.current_positions = self._load_position_state()
# --- MODIFIED: Check if shared_status is None before using it ---
if self.shared_status is None:
logging.warning("No shared_status dictionary provided. Initializing a new one.")
self.shared_status = {}
self.current_signal = self.shared_status.get("current_signal", "WAIT")
self.signal_price = self.shared_status.get("signal_price")
self.last_signal_change_utc = self.shared_status.get("last_signal_change_utc")
# Record the strategy's start time to ignore historical data
self.start_time_utc = datetime.now(timezone.utc) self.start_time_utc = datetime.now(timezone.utc)
logging.info(f"Strategy initialized. Ignoring all trades before {self.start_time_utc.isoformat()}") logging.info(f"Strategy initialized. Ignoring all trades before {self.start_time_utc.isoformat()}")
logging.info(f"Loaded positions: {self.current_positions}")
def _load_position_state(self) -> dict:
"""Loads the strategy's current open positions from a file."""
if os.path.exists(self.position_state_file):
try:
with open(self.position_state_file, 'r') as f:
logging.info(f"Loading existing position state from {self.position_state_file}")
return json.load(f)
except (IOError, json.JSONDecodeError):
logging.warning(f"Could not read position state file {self.position_state_file}. Starting fresh.")
return {} # { "ETH": {"side": "long", "size": 0.01, "entry": 3000}, ... }
def _save_position_state(self):
"""Saves the strategy's current open positions to a file."""
try:
with open(self.position_state_file, 'w') as f:
json.dump(self.current_positions, f, indent=4)
except IOError as e:
logging.error(f"Failed to save position state: {e}")
def calculate_signals(self, df): def calculate_signals(self, df):
# This strategy is event-driven, so it does not use polling-based signal calculation. # This strategy is event-driven, so it does not use polling-based signal calculation.
pass pass
def send_explicit_signal(self, signal: str, coin: str, price: float, trade_params: dict, size: float):
"""Helper to send a formatted signal to the PositionManager."""
config = {
"agent": self.params.get("agent"),
"parameters": trade_params
}
self.trade_signal_queue.put({
"strategy_name": self.strategy_name,
"signal": signal, # e.g., "OPEN_LONG", "CLOSE_SHORT"
"coin": coin,
"signal_price": price,
"config": config,
"size": size # Explicitly pass size
})
logging.info(f"Explicit signal SENT: {signal} {coin} @ {price}, Size: {size}")
def on_fill_message(self, message): def on_fill_message(self, message):
""" """
This is the callback function that gets triggered by the WebSocket This is the callback function that gets triggered by the WebSocket
@ -66,57 +116,82 @@ class CopyTraderStrategy(BaseStrategy):
logging.debug(f"Received {len(fills)} fill(s) for user {user_address}") logging.debug(f"Received {len(fills)} fill(s) for user {user_address}")
for fill in fills: for fill in fills:
# Check if the trade is new or historical # Check if the trade is new or historical
trade_time = datetime.fromtimestamp(fill['time'] / 1000, tz=timezone.utc) trade_time = datetime.fromtimestamp(fill['time'] / 1000, tz=timezone.utc)
if trade_time < self.start_time_utc: if trade_time < self.start_time_utc:
logging.info(f"Ignoring stale/historical trade from {trade_time.isoformat()}") logging.info(f"Ignoring stale/historical trade from {trade_time.isoformat()}")
continue
coin = fill.get('coin').upper()
if coin in self.allowed_coins:
side = fill.get('side')
price = float(fill.get('px'))
fill_size = float(fill.get('sz'))
# Get our strategy's configured trade size for this coin
coin_config = self.coins_to_copy.get(coin)
if not coin_config or not coin_config.get("size"):
logging.warning(f"No trade size specified for {coin}. Ignoring fill.")
continue continue
coin = fill.get('coin') strategy_trade_size = coin_config.get("size")
if coin in self.allowed_coins: # Prepare config for the signal
side = fill.get('side') trade_params = self.params.copy()
price = float(fill.get('px')) trade_params.update(coin_config)
signal = "HOLD" # Get our current position state for this coin
if side == "B": current_local_pos = self.current_positions.get(coin)
signal = "BUY" current_local_side = current_local_pos.get("side") if current_local_pos else None
elif side == "A":
signal = "SELL"
coin_config = self.coins_to_copy.get(coin) signal_sent = False
if not coin_config or not coin_config.get("size"): if side == "B": # Target bought
logging.warning(f"No trade size specified for {coin}. Ignoring fill.") if current_local_side == "short":
continue # Flip: Close short, then open long
logging.warning(f"[{coin}] Target BOUGHT, we are SHORT. Flipping to LONG.")
# --- 1. Create the trade-specific config --- self.send_explicit_signal("CLOSE_SHORT", coin, price, trade_params, current_local_pos.get("size"))
trade_params = self.params.copy() self.send_explicit_signal("OPEN_LONG", coin, price, trade_params, strategy_trade_size)
trade_params.update(coin_config) self.current_positions[coin] = {"side": "long", "size": strategy_trade_size, "entry": price}
trade_config = { signal_sent = True
"agent": self.params.get("agent"), elif current_local_side is None:
"parameters": trade_params # New: Open long
} logging.warning(f"[{coin}] Target BOUGHT, we are FLAT. Opening LONG.")
self.send_explicit_signal("OPEN_LONG", coin, price, trade_params, strategy_trade_size)
# --- 2. (PRIORITY) Put the signal on the queue for the executor --- self.current_positions[coin] = {"side": "long", "size": strategy_trade_size, "entry": price}
self.trade_signal_queue.put({ signal_sent = True
"strategy_name": self.strategy_name, else: # We are already long
"signal": signal, logging.info(f"[{coin}] Target BOUGHT, we are already LONG. Ignoring.")
"coin": coin,
"signal_price": price, elif side == "A": # Target sold
"config": trade_config if current_local_side == "long":
}) # Flip: Close long, then open short
logging.warning(f"[{coin}] Target SOLD, we are LONG. Flipping to SHORT.")
# --- 3. (Secondary) Update internal state and log --- self.send_explicit_signal("CLOSE_LONG", coin, price, trade_params, current_local_pos.get("size"))
self.current_signal = signal self.send_explicit_signal("OPEN_SHORT", coin, price, trade_params, strategy_trade_size)
self.current_positions[coin] = {"side": "short", "size": strategy_trade_size, "entry": price}
signal_sent = True
elif current_local_side is None:
# New: Open short
logging.warning(f"[{coin}] Target SOLD, we are FLAT. Opening SHORT.")
self.send_explicit_signal("OPEN_SHORT", coin, price, trade_params, strategy_trade_size)
self.current_positions[coin] = {"side": "short", "size": strategy_trade_size, "entry": price}
signal_sent = True
else: # We are already short
logging.info(f"[{coin}] Target SOLD, we are already SHORT. Ignoring.")
if signal_sent:
# Update dashboard status
self.current_signal = f"{side} @ {coin}"
self.signal_price = price self.signal_price = price
self.last_signal_change_utc = trade_time.isoformat() self.last_signal_change_utc = trade_time.isoformat()
self._save_status() # Update the dashboard status file # --- MODIFIED: Save BOTH status files ---
self._save_status() # For dashboard
logging.warning(f"Copy trade signal SENT for {coin}: {signal} @ {price}, Size: {coin_config['size']}") self._save_position_state() # For our internal tracking
logging.info(f"Source trade logged: {json.dumps(fill)}")
else: logging.info(f"Source trade logged: {json.dumps(fill)}")
logging.info(f"Ignoring fill for unmonitored coin: {coin}") else:
logging.info(f"Ignoring fill for unmonitored coin: {coin}")
except Exception as e: except Exception as e:
logging.error(f"Error in on_fill_message: {e}", exc_info=True) logging.error(f"Error in on_fill_message: {e}", exc_info=True)
@ -142,37 +217,81 @@ class CopyTraderStrategy(BaseStrategy):
This method overrides the default polling loop. It establishes a This method overrides the default polling loop. It establishes a
persistent WebSocket connection and runs a watchdog to ensure persistent WebSocket connection and runs a watchdog to ensure
it stays connected. it stays connected.
It also catches KeyboardInterrupt to gracefully shut down positions.
""" """
if not self._connect_and_subscribe(): try:
# If connection fails on start, wait 60s before letting the process restart if not self._connect_and_subscribe():
time.sleep(60) # If connection fails on start, wait 60s before letting the process restart
return time.sleep(60)
return
# --- ADDED: Save the initial "WAIT" status --- # Save the initial "WAIT" status
self._save_status() self._save_status()
while True: while True:
try: try:
time.sleep(15) # Check the connection every 15 seconds time.sleep(15) # Check the connection every 15 seconds
if self.info is None or not self.info.ws_manager.is_alive():
logging.error(f"WebSocket connection lost. Attempting to reconnect...")
if self.info and self.info.ws_manager: if self.info is None or not self.info.ws_manager.is_alive():
try: logging.error(f"WebSocket connection lost. Attempting to reconnect...")
self.info.ws_manager.stop()
except Exception as e: if self.info and self.info.ws_manager:
logging.error(f"Error stopping old ws_manager: {e}") try:
self.info.ws_manager.stop()
if not self._connect_and_subscribe(): except Exception as e:
logging.error("Reconnect failed, will retry in 15s.") logging.error(f"Error stopping old ws_manager: {e}")
if not self._connect_and_subscribe():
logging.error("Reconnect failed, will retry in 15s.")
else:
logging.info("Successfully reconnected to WebSocket.")
self._save_status()
else: else:
logging.info("Successfully reconnected to WebSocket.") logging.debug("Watchdog check: WebSocket connection is active.")
# After reconnecting, save the current status again
self._save_status() except Exception as e:
else: logging.error(f"An error occurred in the watchdog loop: {e}", exc_info=True)
logging.debug("Watchdog check: WebSocket connection is active.")
except KeyboardInterrupt: # --- THIS IS THE GRACEFUL SHUTDOWN LOGIC ---
except Exception as e: logging.warning(f"Shutdown signal received. Closing all open positions for '{self.strategy_name}'...")
logging.error(f"An error occurred in the watchdog loop: {e}", exc_info=True)
# Use a copy of the items to avoid runtime modification errors
for coin, position in list(self.current_positions.items()):
current_side = position.get("side")
trade_size = position.get("size")
if not current_side or not trade_size:
continue
# Find the config for this coin
coin_config = self.coins_to_copy.get(coin.upper(), {})
trade_params = self.params.copy()
trade_params.update(coin_config)
# Use the last entry price as a placeholder for the market close order
price = position.get("entry", 1) # Use 1 as a failsafe
if current_side == "long":
logging.warning(f"Sending CLOSE_LONG for {coin}, {price}, {trade_size}...")
#self.send_explicit_signal("CLOSE_LONG", coin, price, trade_params, trade_size)
#del self.current_positions[coin] # Assume it will close
elif current_side == "short":
logging.warning(f"Sending CLOSE_SHORT for {coin}, {price}, {trade_size} ...")
#self.send_explicit_signal("CLOSE_SHORT", coin, price, trade_params, trade_size)
#del self.current_positions[coin] # Assume it will close
self._save_position_state() # Save the new empty state
logging.info("All closing signals sent. Exiting strategy.")
except Exception as e:
logging.error(f"An unhandled error occurred in run_event_loop: {e}", exc_info=True)
finally:
if self.info and self.info.ws_manager and self.info.ws_manager.is_alive():
try:
self.info.ws_manager.stop()
logging.info("WebSocket connection stopped.")
except Exception as e:
logging.error(f"Error stopping ws_manager on exit: {e}")

View File

@ -4,6 +4,7 @@ import os
import sys import sys
import json import json
import time import time
# --- REVERTED: Removed math import ---
from datetime import datetime from datetime import datetime
import multiprocessing import multiprocessing
@ -14,48 +15,44 @@ from hyperliquid.utils import constants
from dotenv import load_dotenv from dotenv import load_dotenv
from logging_utils import setup_logging from logging_utils import setup_logging
from trade_log import log_trade
# Load environment variables from a .env file
load_dotenv() load_dotenv()
class TradeExecutor: class TradeExecutor:
""" """
Monitors a shared queue for strategy signals and executes trades. Executes orders from a queue and, upon API success,
This script is now a dedicated, event-driven consumer. updates the shared 'opened_positions.json' state file.
It is the single source of truth for position state.
""" """
def __init__(self, log_level: str, trade_signal_queue: multiprocessing.Queue, shared_executor_status: dict = None): def __init__(self, log_level: str, order_execution_queue: multiprocessing.Queue):
setup_logging(log_level, 'TradeExecutor') # Note: Logging is set up by the run_trade_executor function
self.trade_signal_queue = trade_signal_queue self.order_execution_queue = order_execution_queue
# Optional Manager.dict() to store live managed positions and other executor status
self.shared_executor_status = shared_executor_status
self.vault_address = os.environ.get("MAIN_WALLET_ADDRESS") self.vault_address = os.environ.get("MAIN_WALLET_ADDRESS")
if not self.vault_address: if not self.vault_address:
logging.error("MAIN_WALLET_ADDRESS not set.") logging.error("MAIN_WALLET_ADDRESS not set.")
# --- FIX: Raise an exception instead of sys.exit() --- sys.exit(1)
# This allows the main_app process manager to catch and log the error.
raise ValueError("MAIN_WALLET_ADDRESS not set in environment.")
# --- FIX: Corrected constant name from MAIN_NET_API_URL to MAINNET_API_URL ---
self.info = Info(constants.MAINNET_API_URL, skip_ws=True) self.info = Info(constants.MAINNET_API_URL, skip_ws=True)
self.exchanges = self._load_agents() self.exchanges = self._load_agents()
if not self.exchanges: if not self.exchanges:
logging.error("No trading agents found in .env file.") logging.error("No trading agents found in .env file.")
# --- FIX: Raise an exception instead of sys.exit() --- sys.exit(1)
raise ValueError("No trading agents found in .env file. Check AGENT_PRIVATE_KEY or _AGENT_PK vars.")
# --- REVERTED: Removed asset_meta loading ---
# self.asset_meta = self._load_asset_metadata()
# --- NEW: State management logic ---
self.opened_positions_file = os.path.join("_data", "opened_positions.json")
self.opened_positions = self._load_opened_positions()
logging.info(f"Trade Executor started. Loaded {len(self.opened_positions)} positions.")
self.managed_positions_path = os.path.join("_data", "executor_managed_positions.json")
self.managed_positions = self._load_managed_positions()
logging.info(f"TradeExecutor initialized. Agents available: {list(self.exchanges.keys())}")
def _load_agents(self) -> dict: def _load_agents(self) -> dict:
""" # ... (omitted for brevity, this logic is correct and unchanged) ...
Discovers and initializes agents by scanning for environment variables.
"""
exchanges = {} exchanges = {}
logging.info("Discovering agents from environment variables...") logging.info("Discovering agents from environment variables...")
for env_var, private_key in os.environ.items(): for env_var, private_key in os.environ.items():
@ -73,179 +70,122 @@ class TradeExecutor:
except Exception as e: except Exception as e:
logging.error(f"Failed to initialize agent '{agent_name}': {e}") logging.error(f"Failed to initialize agent '{agent_name}': {e}")
return exchanges return exchanges
# --- REVERTED: Removed asset metadata loading ---
# def _load_asset_metadata(self) -> dict: ...
def _load_managed_positions(self) -> dict: # --- NEW: Position state save/load methods ---
"""Loads the state of which strategy manages which position.""" def _load_opened_positions(self) -> dict:
# Prefer shared in-memory state when available """Loads the state of currently managed positions from a JSON file."""
if not os.path.exists(self.opened_positions_file):
return {}
try: try:
if self.shared_executor_status is not None: with open(self.opened_positions_file, 'r', encoding='utf-8') as f:
mgr = self.shared_executor_status.get('managed_positions') if isinstance(self.shared_executor_status, dict) else None return json.load(f)
if mgr: except (json.JSONDecodeError, IOError) as e:
logging.info("Loading managed positions from shared executor status.") logging.error(f"Failed to read '{self.opened_positions_file}': {e}. Starting with empty state.", exc_info=True)
return dict(mgr) return {}
except Exception:
logging.debug("Unable to read managed positions from shared status. Falling back to file.")
if os.path.exists(self.managed_positions_path): def _save_opened_positions(self):
try: """Saves the current state of managed positions to a JSON file."""
with open(self.managed_positions_path, 'r') as f:
logging.info("Loading existing managed positions state from file.")
return json.load(f)
except (IOError, json.JSONDecodeError):
logging.warning("Could not read managed positions file. Starting fresh.")
return {}
def _save_managed_positions(self):
"""Saves the current state of managed positions."""
try: try:
if self.shared_executor_status is not None: with open(self.opened_positions_file, 'w', encoding='utf-8') as f:
try: json.dump(self.opened_positions, f, indent=4)
# store under a known key logging.debug(f"Successfully saved {len(self.opened_positions)} positions to '{self.opened_positions_file}'")
self.shared_executor_status['managed_positions'] = dict(self.managed_positions)
except Exception:
# fallback: try direct assignment
self.shared_executor_status['managed_positions'] = self.managed_positions
else:
with open(self.managed_positions_path, 'w') as f:
json.dump(self.managed_positions, f, indent=4)
except IOError as e: except IOError as e:
logging.error(f"Failed to save managed positions state: {e}") logging.error(f"Failed to write to '{self.opened_positions_file}': {e}", exc_info=True)
# --- REVERTED: Removed tick rounding function ---
# def _round_to_tick(self, price, tick_size): ...
def run(self): def run(self):
""" """
Main execution loop. Blocks and waits for a signal from the queue. Main execution loop. Waits for an order and updates state on success.
""" """
logging.info("Trade Executor started. Waiting for signals...") logging.info("Trade Executor started. Waiting for orders...")
while True: while True:
try: try:
trade_signal = self.trade_signal_queue.get() order = self.order_execution_queue.get()
if not trade_signal: if not order:
continue continue
logging.info(f"Received signal: {trade_signal}") logging.info(f"Received order: {order}")
# Basic validation and debug information to help trace gaps agent_name = order['agent']
if 'config' not in trade_signal: action = order['action']
logging.error(f"Signal missing 'config' key. Ignoring: {trade_signal}") coin = order['coin']
continue is_buy = order['is_buy']
if 'strategy_name' not in trade_signal: size = order['size']
logging.error(f"Signal missing 'strategy_name' key. Ignoring: {trade_signal}") limit_px = order.get('limit_px')
continue
# Special command handling
if isinstance(trade_signal, dict) and trade_signal.get('_cmd') == 'CLOSE_ALL':
target_agent = trade_signal.get('agent')
logging.warning(f"Received CLOSE_ALL command for agent: {target_agent}")
if not target_agent:
logging.error("CLOSE_ALL command missing 'agent' field. Ignoring.")
continue
# Iterate managed positions and close those opened by the target agent
to_close = [s for s, v in self.managed_positions.items() if v.get('agent') == target_agent]
if not to_close:
logging.info(f"No managed positions found for agent '{target_agent}'.")
continue
for sname in to_close:
pos = self.managed_positions.get(sname)
if not pos:
continue
coin = pos.get('coin')
side = pos.get('side')
size = pos.get('size')
# Determine is_buy to neutralize the position
is_buy = True if side == 'short' else False
logging.warning(f"[CLOSE_ALL] Closing {side} position for strategy {sname}, coin {coin}, size {size}")
try:
# Use the agent's exchange if available
exch = self.exchanges.get(target_agent)
if exch:
exch.market_open(coin, is_buy, size, None, 0.01)
else:
logging.error(f"Exchange object for agent '{target_agent}' not found. Skipping live close for {sname}.")
except Exception as e:
logging.error(f"Error closing position for {sname}: {e}")
# remove from managed positions regardless to avoid stuck state
try:
del self.managed_positions[sname]
except KeyError:
pass
self._save_managed_positions()
logging.info(f"CLOSE_ALL for agent '{target_agent}' completed.")
continue
name = trade_signal['strategy_name']
config = trade_signal['config']
params = config.get('parameters', {})
coin = trade_signal['coin']
desired_signal = trade_signal['signal']
status = trade_signal
size = params.get('size')
if size is None:
logging.error(f"[{name}] No 'size' in parameters: {params}. Skipping.")
continue
leverage_long = int(params.get('leverage_long', 2))
leverage_short = int(params.get('leverage_short', 2))
current_position = self.managed_positions.get(name)
agent_name = (config.get("agent") or "default").lower()
exchange_to_use = self.exchanges.get(agent_name) exchange_to_use = self.exchanges.get(agent_name)
if not exchange_to_use: if not exchange_to_use:
logging.error(f"[{name}] Agent '{agent_name}' not found. Available agents: {list(self.exchanges.keys())}. Skipping trade.") logging.error(f"Agent '{agent_name}' not found. Skipping order.")
continue continue
# --- State Machine Logic (now runs instantly on signal) ---
if desired_signal == "BUY" or desired_signal == "INIT_BUY":
if not current_position:
logging.warning(f"[{name}] ACTION: Setting leverage to {leverage_long}x and opening LONG for {coin}.")
exchange_to_use.update_leverage(leverage_long, coin)
exchange_to_use.market_open(coin, True, size, None, 0.01)
self.managed_positions[name] = {"coin": coin, "side": "long", "size": size}
log_trade(strategy=name, coin=coin, action="OPEN_LONG", price=status.get('signal_price', 0), size=size, signal=desired_signal)
elif current_position['side'] == 'short':
logging.warning(f"[{name}] ACTION: Closing SHORT and opening LONG for {coin} with {leverage_long}x leverage.")
exchange_to_use.update_leverage(leverage_long, coin)
# 1. Close the short by buying back (this is a market_open, but is_buy=True)
exchange_to_use.market_open(coin, True, current_position['size'], None, 0.01)
log_trade(strategy=name, coin=coin, action="CLOSE_SHORT", price=status.get('signal_price', 0), size=current_position['size'], signal=desired_signal)
# 2. Open the new long
exchange_to_use.market_open(coin, True, size, None, 0.01)
self.managed_positions[name] = {"coin": coin, "side": "long", "size": size}
log_trade(strategy=name, coin=coin, action="OPEN_LONG", price=status.get('signal_price', 0), size=size, signal=desired_signal)
elif desired_signal == "SELL" or desired_signal == "INIT_SELL":
if not current_position:
logging.warning(f"[{name}] ACTION: Setting leverage to {leverage_short}x and opening SHORT for {coin}.")
exchange_to_use.update_leverage(leverage_short, coin)
exchange_to_use.market_open(coin, False, size, None, 0.01)
self.managed_positions[name] = {"coin": coin, "side": "short", "size": size}
log_trade(strategy=name, coin=coin, action="OPEN_SHORT", price=status.get('signal_price', 0), size=size, signal=desired_signal)
elif current_position['side'] == 'long':
logging.warning(f"[{name}] ACTION: Closing LONG and opening SHORT for {coin} with {leverage_short}x leverage.")
exchange_to_use.update_leverage(leverage_short, coin)
# 1. Close the long by selling
exchange_to_use.market_open(coin, False, current_position['size'], None, 0.01)
log_trade(strategy=name, coin=coin, action="CLOSE_LONG", price=status.get('signal_price', 0), size=current_position['size'], signal=desired_signal)
# 2. Open the new short
exchange_to_use.market_open(coin, False, size, None, 0.01)
self.managed_positions[name] = {"coin": coin, "side": "short", "size": size}
# --- FIX: Corrected typo from 'signal.desired_signal' to 'signal=desired_signal' ---
log_trade(strategy=name, coin=coin, action="OPEN_SHORT", price=status.get('signal_price', 0), size=size, signal=desired_signal)
elif desired_signal == "FLAT": response = None
if current_position:
logging.warning(f"[{name}] ACTION: Close {current_position['side']} position for {coin}.")
is_buy = current_position['side'] == 'short'
exchange_to_use.market_open(coin, is_buy, current_position['size'], None, 0.01)
del self.managed_positions[name]
log_trade(strategy=name, coin=coin, action=f"CLOSE_{current_position['side'].upper()}", price=status.get('signal_price', 0), size=current_position['size'], signal=desired_signal)
self._save_managed_positions() if action == "market_open" or action == "market_close":
reduce_only = (action == "market_close")
log_action = "MARKET CLOSE" if reduce_only else "MARKET OPEN"
logging.warning(f"ACTION: {log_action} {coin} {'BUY' if is_buy else 'SELL'} {size}")
# --- REVERTED: Removed all slippage and rounding logic ---
# The raw limit_px from the order is now used directly
final_price = limit_px
logging.info(f"[{agent_name}] Using raw price for {coin}: {final_price}")
order_type = {"limit": {"tif": "Ioc"}}
# --- REVERTED: Uses final_price (which is just limit_px) ---
response = exchange_to_use.order(coin, is_buy, size, final_price, order_type, reduce_only=reduce_only)
logging.info(f"Market order response: {response}")
# --- NEW: STATE UPDATE ON SUCCESS ---
if response.get("status") == "ok":
response_data = response.get("response", {}).get("data", {})
if response_data and "statuses" in response_data:
# Check if the order status contains an error
if "error" not in response_data["statuses"][0]:
position_key = order['position_key']
if action == "market_open":
# Add to state
self.opened_positions[position_key] = {
"strategy": order['strategy'],
"coin": coin,
"side": "long" if is_buy else "short",
"open_time_utc": order['open_time_utc'],
"open_price": order['open_price'],
"amount": order['amount']
}
logging.info(f"Successfully opened position {position_key}. Saving state.")
elif action == "market_close":
# Remove from state
if position_key in self.opened_positions:
del self.opened_positions[position_key]
logging.info(f"Successfully closed position {position_key}. Saving state.")
else:
logging.warning(f"Received close confirmation for {position_key}, but it was not in state.")
self._save_opened_positions() # Save state to disk
else:
logging.error(f"API Error for {action}: {response_data['statuses'][0]['error']}")
else:
logging.error(f"Unexpected API response format: {response}")
else:
logging.error(f"API call failed, status: {response.get('status')}")
elif action == "update_leverage":
leverage = int(size)
logging.warning(f"ACTION: UPDATE LEVERAGE {coin} to {leverage}x")
response = exchange_to_use.update_leverage(leverage, coin)
logging.info(f"Update leverage response: {response}")
else:
logging.warning(f"Received unknown action: {action}")
except Exception as e: except Exception as e:
logging.error(f"An error occurred in the main executor loop: {e}", exc_info=True) logging.error(f"An error occurred in the main executor loop: {e}", exc_info=True)
time.sleep(1) time.sleep(1)
# This script is no longer run directly, but is called by main_app.py