Compare commits

...

10 Commits

Author SHA1 Message Date
8c35bc2fae bid, ask, last traded price 2025-11-04 13:34:49 +01:00
dfec8dcf01 size taken from monitored wallet 2025-11-02 22:38:31 +01:00
dfbb96d2b1 updated fast orders 2025-11-02 19:56:40 +01:00
91e2fc6021 fixes, old way to handle strategies 2025-10-27 21:54:33 +01:00
576f1d6744 new strategies 2025-10-25 21:51:25 +02:00
e4d7f85ea7 detailed info about wallets 2025-10-25 19:59:13 +02:00
355b94968f market cap fixes 2025-10-25 19:58:52 +02:00
d1b72b09e8 tmiestamp_ms column added to all tables as primary key 2025-10-22 22:22:13 +02:00
4cdf66269a wallet info 2025-10-21 23:53:06 +02:00
227f44f22d save market cap of all coins 2025-10-21 23:52:32 +02:00
19 changed files with 3184 additions and 431 deletions

View File

@ -15,13 +15,13 @@ from logging_utils import setup_logging
# --- Configuration --- # --- Configuration ---
DEFAULT_ADDRESSES_TO_WATCH = [ DEFAULT_ADDRESSES_TO_WATCH = [
#"0xd4c1f7e8d876c4749228d515473d36f919583d1d", #"0xd4c1f7e8d876c4749228d515473d36f919583d1d",
"0x0fd468a73084daa6ea77a9261e40fdec3e67e0c7", "0x47930c76790c865217472f2ddb4d14c640ee450a",
# "0x4d69495d16fab95c3c27b76978affa50301079d0", # "0x4d69495d16fab95c3c27b76978affa50301079d0",
# "0x09bc1cf4d9f0b59e1425a8fde4d4b1f7d3c9410d", # "0x09bc1cf4d9f0b59e1425a8fde4d4b1f7d3c9410d",
"0xc6ac58a7a63339898aeda32499a8238a46d88e84", "0xc6ac58a7a63339898aeda32499a8238a46d88e84",
"0xa8ef95dbd3db55911d3307930a84b27d6e969526", "0xa8ef95dbd3db55911d3307930a84b27d6e969526",
# "0x4129c62faf652fea61375dcd9ca8ce24b2bb8b95", # "0x4129c62faf652fea61375dcd9ca8ce24b2bb8b95",
"0xbf1935fe7ab6d0aa3ee8d3da47c2f80e215b2a1c", "0x32885a6adac4375858E6edC092EfDDb0Ef46484C",
] ]
MAX_FILLS_TO_DISPLAY = 10 MAX_FILLS_TO_DISPLAY = 10
LOGS_DIR = "_logs" LOGS_DIR = "_logs"

0
app.py
View File

165
base_strategy.py Normal file
View File

@ -0,0 +1,165 @@
from abc import ABC, abstractmethod
import pandas as pd
import json
import os
import logging
from datetime import datetime, timezone
import sqlite3
import multiprocessing
import time
from logging_utils import setup_logging
from hyperliquid.info import Info
from hyperliquid.utils import constants
class BaseStrategy(ABC):
"""
An abstract base class that defines the blueprint for all trading strategies.
It provides common functionality like loading data, saving status, and state management.
"""
def __init__(self, strategy_name: str, params: dict, trade_signal_queue: multiprocessing.Queue = None, shared_status: dict = None):
self.strategy_name = strategy_name
self.params = params
self.trade_signal_queue = trade_signal_queue
# Optional multiprocessing.Manager().dict() to hold live status (avoids file IO)
self.shared_status = shared_status
self.coin = params.get("coin", "N/A")
self.timeframe = params.get("timeframe", "N/A")
self.db_path = os.path.join("_data", "market_data.db")
self.status_file_path = os.path.join("_data", f"strategy_status_{self.strategy_name}.json")
self.current_signal = "INIT"
self.last_signal_change_utc = None
self.signal_price = None
# Note: Logging is set up by the run_strategy function
def load_data(self) -> pd.DataFrame:
"""Loads historical data for the configured coin and timeframe."""
table_name = f"{self.coin}_{self.timeframe}"
periods = [v for k, v in self.params.items() if 'period' in k or '_ma' in k or 'slow' in k or 'fast' in k]
limit = max(periods) + 50 if periods else 500
try:
with sqlite3.connect(f"file:{self.db_path}?mode=ro", uri=True) as conn:
query = f'SELECT * FROM "{table_name}" ORDER BY datetime_utc DESC LIMIT {limit}'
df = pd.read_sql(query, conn, parse_dates=['datetime_utc'])
if df.empty: return pd.DataFrame()
df.set_index('datetime_utc', inplace=True)
df.sort_index(inplace=True)
return df
except Exception as e:
logging.error(f"Failed to load data from table '{table_name}': {e}")
return pd.DataFrame()
@abstractmethod
def calculate_signals(self, df: pd.DataFrame) -> pd.DataFrame:
"""The core logic of the strategy. Must be implemented by child classes."""
pass
def calculate_signals_and_state(self, df: pd.DataFrame) -> bool:
"""
A wrapper that calls the strategy's signal calculation, determines
the last signal change, and returns True if the signal has changed.
"""
df_with_signals = self.calculate_signals(df)
df_with_signals.dropna(inplace=True)
if df_with_signals.empty:
return False
df_with_signals['position_change'] = df_with_signals['signal'].diff()
last_signal_int = df_with_signals['signal'].iloc[-1]
new_signal_str = "HOLD"
if last_signal_int == 1: new_signal_str = "BUY"
elif last_signal_int == -1: new_signal_str = "SELL"
signal_changed = False
if self.current_signal == "INIT":
if new_signal_str == "BUY": self.current_signal = "INIT_BUY"
elif new_signal_str == "SELL": self.current_signal = "INIT_SELL"
else: self.current_signal = "HOLD"
signal_changed = True
elif new_signal_str != self.current_signal:
self.current_signal = new_signal_str
signal_changed = True
if signal_changed:
last_change_series = df_with_signals[df_with_signals['position_change'] != 0]
if not last_change_series.empty:
last_change_row = last_change_series.iloc[-1]
self.last_signal_change_utc = last_change_row.name.tz_localize('UTC').isoformat()
self.signal_price = last_change_row['close']
return signal_changed
def _save_status(self):
"""Saves the current strategy state to its JSON file."""
status = {
"strategy_name": self.strategy_name,
"current_signal": self.current_signal,
"last_signal_change_utc": self.last_signal_change_utc,
"signal_price": self.signal_price,
"last_checked_utc": datetime.now(timezone.utc).isoformat()
}
# If a shared status dict is provided (Manager.dict()), update it instead of writing files
try:
if self.shared_status is not None:
try:
# store the status under the strategy name for easy lookup
self.shared_status[self.strategy_name] = status
except Exception:
# Manager proxies may not accept nested mutable objects consistently; assign a copy
self.shared_status[self.strategy_name] = dict(status)
else:
with open(self.status_file_path, 'w', encoding='utf-8') as f:
json.dump(status, f, indent=4)
except IOError as e:
logging.error(f"Failed to write status file for {self.strategy_name}: {e}")
def run_polling_loop(self):
"""
The default execution loop for polling-based strategies (e.g., SMAs).
"""
while True:
df = self.load_data()
if df.empty:
logging.warning("No data loaded. Waiting 1 minute...")
time.sleep(60)
continue
signal_changed = self.calculate_signals_and_state(df.copy())
self._save_status()
if signal_changed or self.current_signal == "INIT_BUY" or self.current_signal == "INIT_SELL":
logging.warning(f"New signal detected: {self.current_signal}")
self.trade_signal_queue.put({
"strategy_name": self.strategy_name,
"signal": self.current_signal,
"coin": self.coin,
"signal_price": self.signal_price,
"config": {"agent": self.params.get("agent"), "parameters": self.params}
})
if self.current_signal == "INIT_BUY": self.current_signal = "BUY"
if self.current_signal == "INIT_SELL": self.current_signal = "SELL"
logging.info(f"Current Signal: {self.current_signal}")
time.sleep(60)
def run_event_loop(self):
"""
A placeholder for event-driven (WebSocket) strategies.
Child classes must override this.
"""
logging.error("run_event_loop() is not implemented for this strategy.")
time.sleep(3600) # Sleep for an hour to prevent rapid error loops
def on_fill_message(self, message):
"""
Placeholder for the WebSocket callback.
Child classes must override this.
"""
pass

95
coin_id_map.py Normal file
View File

@ -0,0 +1,95 @@
import os
import json
import logging
import requests
from hyperliquid.info import Info
from hyperliquid.utils import constants
from logging_utils import setup_logging
def update_coin_mapping():
"""
Fetches all assets from Hyperliquid and all coins from CoinGecko,
then creates and saves a mapping from the Hyperliquid symbol to the
CoinGecko ID using a robust matching algorithm.
"""
setup_logging('normal', 'CoinMapUpdater')
logging.info("Starting coin mapping update process...")
# --- 1. Fetch all assets from Hyperliquid ---
try:
logging.info("Fetching assets from Hyperliquid...")
info = Info(constants.MAINNET_API_URL, skip_ws=True)
meta, asset_contexts = info.meta_and_asset_ctxs()
hyperliquid_assets = meta['universe']
logging.info(f"Found {len(hyperliquid_assets)} assets on Hyperliquid.")
except Exception as e:
logging.error(f"Failed to fetch assets from Hyperliquid: {e}")
return
# --- 2. Fetch all coins from CoinGecko ---
try:
logging.info("Fetching coin list from CoinGecko...")
response = requests.get("https://api.coingecko.com/api/v3/coins/list")
response.raise_for_status()
coingecko_coins = response.json()
# Create more robust lookup tables
cg_symbol_lookup = {coin['symbol'].upper(): coin['id'] for coin in coingecko_coins}
cg_name_lookup = {coin['name'].upper(): coin['id'] for coin in coingecko_coins}
logging.info(f"Found {len(coingecko_coins)} coins on CoinGecko.")
except requests.exceptions.RequestException as e:
logging.error(f"Failed to fetch coin list from CoinGecko: {e}")
return
# --- 3. Create the mapping ---
final_mapping = {}
# Use manual overrides for critical coins where symbols are ambiguous
manual_overrides = {
"BTC": "bitcoin",
"ETH": "ethereum",
"SOL": "solana",
"BNB": "binancecoin",
"HYPE": "hyperliquid",
"PUMP": "pump-fun",
"ASTER": "astar",
"ZEC": "zcash",
"SUI": "sui",
"ACE": "endurance",
# Add other important ones you watch here
}
logging.info("Generating symbol-to-id mapping...")
for asset in hyperliquid_assets:
asset_symbol = asset['name'].upper()
asset_name = asset.get('name', '').upper() # Use full name if available
# Priority 1: Manual Overrides
if asset_symbol in manual_overrides:
final_mapping[asset_symbol] = manual_overrides[asset_symbol]
continue
# Priority 2: Exact Name Match
if asset_name in cg_name_lookup:
final_mapping[asset_symbol] = cg_name_lookup[asset_name]
continue
# Priority 3: Symbol Match
if asset_symbol in cg_symbol_lookup:
final_mapping[asset_symbol] = cg_symbol_lookup[asset_symbol]
else:
logging.warning(f"No match found for '{asset_symbol}' on CoinGecko. It will be excluded.")
# --- 4. Save the mapping to a file ---
map_file_path = os.path.join("_data", "coin_id_map.json")
try:
with open(map_file_path, 'w', encoding='utf-8') as f:
json.dump(final_mapping, f, indent=4, sort_keys=True)
logging.info(f"Successfully saved new coin mapping with {len(final_mapping)} entries to '{map_file_path}'.")
except IOError as e:
logging.error(f"Failed to write coin mapping file: {e}")
if __name__ == "__main__":
update_coin_mapping()

136
dashboard_data_fetcher.py Normal file
View File

@ -0,0 +1,136 @@
import logging
import os
import sys
import json
import time
import argparse # <-- THE FIX: Added this import
from datetime import datetime
from eth_account import Account
from hyperliquid.info import Info
from hyperliquid.utils import constants
from dotenv import load_dotenv
from logging_utils import setup_logging
# Load .env file
load_dotenv()
class DashboardDataFetcher:
"""
A dedicated, lightweight process that runs in a loop to fetch and save
the account's state (balances, positions) for the main dashboard to display.
"""
def __init__(self, log_level: str):
setup_logging(log_level, 'DashboardDataFetcher')
self.vault_address = os.environ.get("MAIN_WALLET_ADDRESS")
if not self.vault_address:
logging.error("MAIN_WALLET_ADDRESS not set in .env file. Cannot proceed.")
sys.exit(1)
self.info = Info(constants.MAINNET_API_URL, skip_ws=True)
self.status_file_path = os.path.join("_logs", "trade_executor_status.json")
self.managed_positions_path = os.path.join("_data", "executor_managed_positions.json")
logging.info(f"Dashboard Data Fetcher initialized for vault: {self.vault_address}")
def load_managed_positions(self) -> dict:
"""Loads the state of which strategy manages which position."""
if os.path.exists(self.managed_positions_path):
try:
with open(self.managed_positions_path, 'r') as f:
data = json.load(f)
# Create a reverse map: {coin: strategy_name}
return {v['coin']: k for k, v in data.items()}
except (IOError, json.JSONDecodeError):
logging.warning("Could not read managed positions file.")
return {}
def fetch_and_save_status(self):
"""Fetches all account data and saves it to the JSON status file."""
try:
perpetuals_state = self.info.user_state(self.vault_address)
spot_state = self.info.spot_user_state(self.vault_address)
meta, all_market_contexts = self.info.meta_and_asset_ctxs()
coin_to_strategy_map = self.load_managed_positions()
status = {
"last_updated_utc": datetime.now().isoformat(),
"perpetuals_account": { "balances": {}, "open_positions": [] },
"spot_account": { "positions": [] }
}
# 1. Extract Perpetuals Account Data
margin_summary = perpetuals_state.get("marginSummary", {})
status["perpetuals_account"]["balances"] = {
"account_value": margin_summary.get("accountValue"),
"total_margin_used": margin_summary.get("totalMarginUsed"),
"withdrawable": margin_summary.get("withdrawable")
}
asset_positions = perpetuals_state.get("assetPositions", [])
for asset_pos in asset_positions:
pos = asset_pos.get('position', {})
if float(pos.get('szi', 0)) != 0:
coin = pos.get('coin')
position_value = float(pos.get('positionValue', 0))
margin_used = float(pos.get('marginUsed', 0))
leverage = position_value / margin_used if margin_used > 0 else 0
position_info = {
"coin": coin,
"strategy": coin_to_strategy_map.get(coin, "Unmanaged"),
"size": pos.get('szi'),
"position_value": pos.get('positionValue'),
"entry_price": pos.get('entryPx'),
"mark_price": pos.get('markPx'),
"pnl": pos.get('unrealizedPnl'),
"liq_price": pos.get('liquidationPx'),
"margin": pos.get('marginUsed'),
"funding": pos.get('fundingRate'),
"leverage": f"{leverage:.1f}x"
}
status["perpetuals_account"]["open_positions"].append(position_info)
# 2. Extract Spot Account Data
price_map = { asset.get("universe", {}).get("name"): asset.get("markPx") for asset in all_market_contexts if asset.get("universe", {}).get("name") }
spot_balances = spot_state.get("balances", [])
for bal in spot_balances:
total_balance = float(bal.get('total', 0))
if total_balance > 0:
coin = bal.get('coin')
mark_price = float(price_map.get(coin, 0))
status["spot_account"]["positions"].append({
"coin": coin, "balance_size": total_balance,
"position_value": total_balance * mark_price, "pnl": "N/A"
})
# 3. Write to file
# Use atomic write to prevent partial reads from main_app
temp_file_path = self.status_file_path + ".tmp"
with open(temp_file_path, 'w', encoding='utf-8') as f:
json.dump(status, f, indent=4)
# Rename is atomic
os.replace(temp_file_path, self.status_file_path)
logging.debug(f"Successfully updated dashboard status file.")
except Exception as e:
logging.error(f"Failed to fetch or save account status: {e}")
def run(self):
"""Main loop to periodically fetch and save data."""
while True:
self.fetch_and_save_status()
time.sleep(5) # Update dashboard data every 5 seconds
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run the Dashboard Data Fetcher.")
parser.add_argument("--log-level", default="normal", choices=['off', 'normal', 'debug'])
args = parser.parse_args()
fetcher = DashboardDataFetcher(log_level=args.log_level)
try:
fetcher.run()
except KeyboardInterrupt:
logging.info("Dashboard Data Fetcher stopped.")

56
del_market_cap_tables.py Normal file
View File

@ -0,0 +1,56 @@
import sqlite3
import logging
import os
from logging_utils import setup_logging
def cleanup_market_cap_tables():
"""
Scans the database and drops all tables related to market cap data
to allow for a clean refresh.
"""
setup_logging('normal', 'DBCleanup')
db_path = os.path.join("_data", "market_data.db")
if not os.path.exists(db_path):
logging.error(f"Database file not found at '{db_path}'. Nothing to clean.")
return
logging.info(f"Connecting to database at '{db_path}'...")
try:
with sqlite3.connect(db_path) as conn:
cursor = conn.cursor()
# Find all tables that were created by the market cap fetcher
cursor.execute("""
SELECT name FROM sqlite_master
WHERE type='table'
AND (name LIKE '%_market_cap' OR name LIKE 'TOTAL_%')
""")
tables_to_drop = cursor.fetchall()
if not tables_to_drop:
logging.info("No market cap tables found to clean up. Database is already clean.")
return
logging.warning(f"Found {len(tables_to_drop)} market cap tables to remove...")
for table in tables_to_drop:
table_name = table[0]
try:
logging.info(f"Dropping table: {table_name}...")
conn.execute(f'DROP TABLE IF EXISTS "{table_name}"')
except Exception as e:
logging.error(f"Failed to drop table {table_name}: {e}")
conn.commit()
logging.info("--- Database cleanup complete ---")
except sqlite3.Error as e:
logging.error(f"A database error occurred: {e}")
except Exception as e:
logging.error(f"An unexpected error occurred: {e}")
if __name__ == "__main__":
cleanup_market_cap_tables()

View File

@ -1,49 +1,187 @@
import logging import logging
import json import json
import time import time
import os
import traceback
import sys
from hyperliquid.info import Info from hyperliquid.info import Info
from hyperliquid.utils import constants from hyperliquid.utils import constants
from logging_utils import setup_logging from logging_utils import setup_logging
# --- Configuration for standalone error logging ---
LOGS_DIR = "_logs"
ERROR_LOG_FILE = os.path.join(LOGS_DIR, "live_market_errors.log")
def log_error(error_message: str, include_traceback: bool = True):
"""A simple, robust file logger for any errors."""
try:
if not os.path.exists(LOGS_DIR):
os.makedirs(LOGS_DIR)
with open(ERROR_LOG_FILE, 'a') as f:
timestamp = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime())
f.write(f"--- ERROR at {timestamp} UTC ---\n")
f.write(error_message + "\n")
if include_traceback:
f.write(traceback.format_exc() + "\n")
f.write("="*50 + "\n")
except Exception:
print(f"CRITICAL: Failed to write to error log file: {error_message}", file=sys.stderr)
def on_message(message, shared_prices_dict): def on_message(message, shared_prices_dict):
""" """
Callback function to process incoming 'allMids' messages and update the Callback function to process incoming WebSocket messages for 'bbo' and 'trades'
shared memory dictionary directly. and update the shared memory dictionary.
""" """
try: try:
if message.get("channel") == "allMids": logging.debug(f"Received WebSocket message: {message}")
new_prices = message.get("data", {}).get("mids", {}) channel = message.get("channel")
# Update the shared dictionary with the new price data
shared_prices_dict.update(new_prices) # --- Parser 1: Handle Best Bid/Offer messages ---
if channel == "bbo":
data = message.get("data")
if not data:
logging.warning("BBO message received with no data.")
return
coin = data.get("coin")
if not coin:
logging.warning("BBO data received with no coin identifier.")
return
bid_ask_data = data.get("bbo")
if not bid_ask_data or not isinstance(bid_ask_data, list) or len(bid_ask_data) < 2:
logging.warning(f"[{coin}] Received BBO message with invalid 'bbo' array: {bid_ask_data}")
return
try:
bid_price_str = bid_ask_data[0].get('px')
ask_price_str = bid_ask_data[1].get('px')
if not bid_price_str or not ask_price_str:
logging.warning(f"[{coin}] BBO data missing 'px' field.")
return
bid_price = float(bid_price_str)
ask_price = float(ask_price_str)
# Update the shared dictionary for Bid and Ask
shared_prices_dict[f"{coin}_bid"] = bid_price
shared_prices_dict[f"{coin}_ask"] = ask_price
logging.info(f"Updated {coin} (BBO): Bid={bid_price:.4f}, Ask={ask_price:.4f}")
except (ValueError, TypeError, IndexError) as e:
logging.error(f"[{coin}] Error parsing BBO data: {e}. Data: {bid_ask_data}")
# --- Parser 2: Handle Live Trade messages ---
elif channel == "trades":
trade_list = message.get("data")
if not trade_list or not isinstance(trade_list, list) or len(trade_list) == 0:
logging.warning(f"Received 'trades' message with invalid data: {trade_list}")
return
# Process all trades in the batch
for trade in trade_list:
try:
coin = trade.get("coin")
price_str = trade.get("px")
if not coin or not price_str:
logging.warning(f"Trade data missing 'coin' or 'px': {trade}")
continue
price = float(price_str)
# Update the shared dictionary for the "Live Price" column
shared_prices_dict[coin] = price
logging.info(f"Updated {coin} (Live Price) to last trade: {price:.4f}")
except (ValueError, TypeError) as e:
logging.error(f"Error parsing trade data: {e}. Data: {trade}")
except Exception as e: except Exception as e:
# It's important to log errors inside the process log_error(f"Error in WebSocket on_message: {e}")
logging.error(f"Error in WebSocket on_message: {e}")
def start_live_feed(shared_prices_dict, log_level='off'): def start_live_feed(shared_prices_dict, coins_to_watch: list, log_level='off'):
""" """
Main function for the WebSocket process. It takes a shared dictionary Main function for the WebSocket process.
and continuously feeds it with live market data. Subscribes to BOTH 'bbo' and 'trades' for all watched coins.
""" """
setup_logging(log_level, 'LiveMarketFeed') setup_logging(log_level, 'LiveMarketFeed_Combined')
# The Info object manages the WebSocket connection. info = None
info = Info(constants.MAINNET_API_URL, skip_ws=False)
# We need to wrap the callback in a lambda to pass our shared dictionary
callback = lambda msg: on_message(msg, shared_prices_dict) callback = lambda msg: on_message(msg, shared_prices_dict)
# Subscribe to the allMids channel def connect_and_subscribe():
subscription = {"type": "allMids"} """Establishes a new WebSocket connection and subscribes to both streams."""
info.subscribe(subscription, callback) try:
logging.info("Subscribed to 'allMids' for live mark prices.") logging.info("Connecting to Hyperliquid WebSocket...")
new_info = Info(constants.MAINNET_API_URL, skip_ws=False)
logging.info("Starting live price feed process. Press Ctrl+C in main app to stop.")
# --- MODIFIED: Subscribe to 'bbo' AND 'trades' for each coin ---
for coin in coins_to_watch:
# Subscribe to Best Bid/Offer
bbo_sub = {"type": "bbo", "coin": coin}
new_info.subscribe(bbo_sub, callback)
logging.info(f"Subscribed to 'bbo' for {coin}.")
# Subscribe to Live Trades
trades_sub = {"type": "trades", "coin": coin}
new_info.subscribe(trades_sub, callback)
logging.info(f"Subscribed to 'trades' for {coin}.")
logging.info("WebSocket connected and all subscriptions sent.")
return new_info
except Exception as e:
log_error(f"Failed to connect to WebSocket: {e}")
return None
info = connect_and_subscribe()
if info is None:
logging.critical("Initial WebSocket connection failed. Exiting process.")
log_error("Initial WebSocket connection failed. Exiting process.", include_traceback=False)
time.sleep(10) # Wait before letting the process manager restart it
return
logging.info("Starting Combined (BBO + Trades) live price feed process.")
try: try:
# The background thread in the SDK handles messages. This loop just keeps the process alive.
while True: while True:
time.sleep(1) # --- Watchdog Logic ---
time.sleep(15) # Check the connection every 15 seconds
if not info.ws_manager.is_alive():
error_msg = "WebSocket connection lost. Attempting to reconnect..."
logging.warning(error_msg)
log_error(error_msg, include_traceback=False) # Log it to the file
try:
info.ws_manager.stop() # Clean up old manager
except Exception as e:
log_error(f"Error stopping old ws_manager: {e}")
info = connect_and_subscribe()
if info is None:
logging.error("Reconnect failed, will retry in 15s.")
else:
logging.info("Successfully reconnected to WebSocket.")
else:
logging.debug("Watchdog check: WebSocket connection is active.")
except KeyboardInterrupt: except KeyboardInterrupt:
logging.info("Stopping WebSocket listener...") logging.info("Stopping WebSocket listener...")
except Exception as e:
log_error(f"Live Market Feed process crashed: {e}")
finally:
if info and info.ws_manager:
info.ws_manager.stop() info.ws_manager.stop()
logging.info("Listener stopped.") logging.info("Combined Listener stopped.")

View File

@ -9,26 +9,29 @@ import schedule
import sqlite3 import sqlite3
import pandas as pd import pandas as pd
from datetime import datetime, timezone from datetime import datetime, timezone
import importlib
# --- REMOVED: import signal ---
# --- REMOVED: from queue import Empty ---
from logging_utils import setup_logging from logging_utils import setup_logging
# --- Using the high-performance WebSocket utility for live prices --- # --- Using the new high-performance WebSocket utility for live prices ---
from live_market_utils import start_live_feed from live_market_utils import start_live_feed
# --- Import the base class for type hinting (optional but good practice) ---
from strategies.base_strategy import BaseStrategy
# --- Configuration --- # --- Configuration ---
WATCHED_COINS = ["BTC", "ETH", "SOL", "BNB", "HYPE", "ASTER", "ZEC", "PUMP", "SUI"] WATCHED_COINS = ["BTC", "ETH", "SOL", "BNB", "HYPE", "ASTER", "ZEC", "PUMP", "SUI"]
LIVE_CANDLE_FETCHER_SCRIPT = "live_candle_fetcher.py" LIVE_CANDLE_FETCHER_SCRIPT = "live_candle_fetcher.py"
RESAMPLER_SCRIPT = "resampler.py" RESAMPLER_SCRIPT = "resampler.py"
MARKET_CAP_FETCHER_SCRIPT = "market_cap_fetcher.py" # --- REMOVED: Market Cap Fetcher ---
TRADE_EXECUTOR_SCRIPT = "trade_executor.py" # --- REMOVED: trade_executor.py is no longer a script ---
DASHBOARD_DATA_FETCHER_SCRIPT = "dashboard_data_fetcher.py"
STRATEGY_CONFIG_FILE = os.path.join("_data", "strategies.json") STRATEGY_CONFIG_FILE = os.path.join("_data", "strategies.json")
DB_PATH = os.path.join("_data", "market_data.db") DB_PATH = os.path.join("_data", "market_data.db")
MARKET_CAP_SUMMARY_FILE = os.path.join("_data", "market_cap_data.json") # --- REMOVED: Market Cap File ---
LOGS_DIR = "_logs" LOGS_DIR = "_logs"
TRADE_EXECUTOR_STATUS_FILE = os.path.join(LOGS_DIR, "trade_executor_status.json") TRADE_EXECUTOR_STATUS_FILE = os.path.join(LOGS_DIR, "trade_executor_status.json")
# --- ADDED: Standard list of timeframes for the resampler to generate ---
STANDARD_RESAMPLING_TIMEFRAMES = ["3m", "5m", "15m", "30m", "37m", "148m", "1h", "2h", "4h", "8h", "12h", "1d", "3d", "1w", "1M"]
def format_market_cap(mc_value): def format_market_cap(mc_value):
"""Formats a large number into a human-readable market cap string.""" """Formats a large number into a human-readable market cap string."""
@ -45,19 +48,56 @@ def format_market_cap(mc_value):
def run_live_candle_fetcher(): def run_live_candle_fetcher():
"""Target function to run the live_candle_fetcher.py script in a resilient loop.""" """Target function to run the live_candle_fetcher.py script in a resilient loop."""
# --- GRACEFUL SHUTDOWN HANDLER ---
import signal
shutdown_requested = False
def handle_shutdown_signal(signum, frame):
nonlocal shutdown_requested
# Use print here as logging may not be set up
print(f"[CandleFetcher] Shutdown signal ({signum}) received. Will stop after current run.")
shutdown_requested = True
signal.signal(signal.SIGTERM, handle_shutdown_signal)
signal.signal(signal.SIGINT, handle_shutdown_signal)
# --- END GRACEFUL SHUTDOWN HANDLER ---
log_file = os.path.join(LOGS_DIR, "live_candle_fetcher.log") log_file = os.path.join(LOGS_DIR, "live_candle_fetcher.log")
while True:
while not shutdown_requested: # <-- MODIFIED
process = None
try: try:
with open(log_file, 'a') as f: with open(log_file, 'a') as f:
command = [sys.executable, LIVE_CANDLE_FETCHER_SCRIPT, "--coins"] + WATCHED_COINS + ["--log-level", "off"] command = [sys.executable, LIVE_CANDLE_FETCHER_SCRIPT, "--coins"] + WATCHED_COINS + ["--log-level", "off"]
f.write(f"\n--- Starting {LIVE_CANDLE_FETCHER_SCRIPT} at {datetime.now()} ---\n") f.write(f"\n--- Starting {LIVE_CANDLE_FETCHER_SCRIPT} at {datetime.now()} ---\n")
subprocess.run(command, check=True, stdout=f, stderr=subprocess.STDOUT)
# Use Popen instead of run to be non-blocking
process = subprocess.Popen(command, stdout=f, stderr=subprocess.STDOUT)
# Poll the process and check for shutdown request
while process.poll() is None and not shutdown_requested:
time.sleep(0.5) # Poll every 500ms
if shutdown_requested and process.poll() is None:
print(f"[CandleFetcher] Terminating subprocess {LIVE_CANDLE_FETCHER_SCRIPT}...")
process.terminate() # Terminate the child script
process.wait() # Wait for it to exit
print(f"[CandleFetcher] Subprocess terminated.")
except (subprocess.CalledProcessError, Exception) as e: except (subprocess.CalledProcessError, Exception) as e:
if shutdown_requested:
break # Don't restart if we're shutting down
with open(log_file, 'a') as f: with open(log_file, 'a') as f:
f.write(f"\n--- PROCESS ERROR at {datetime.now()} ---\n") f.write(f"\n--- PROCESS ERROR at {datetime.now()} ---\n")
f.write(f"Live candle fetcher failed: {e}. Restarting...\n") f.write(f"Live candle fetcher failed: {e}. Restarting...\n")
time.sleep(5) time.sleep(5)
if shutdown_requested:
break # Exit outer loop
print("[CandleFetcher] Live candle fetcher shutting down.")
def run_resampler_job(timeframes_to_generate: list): def run_resampler_job(timeframes_to_generate: list):
"""Defines the job for the resampler, redirecting output to a log file.""" """Defines the job for the resampler, redirecting output to a log file."""
@ -73,70 +113,233 @@ def run_resampler_job(timeframes_to_generate: list):
f.write(f"Failed to run resampler.py job: {e}\n") f.write(f"Failed to run resampler.py job: {e}\n")
def resampler_scheduler(): def resampler_scheduler(timeframes_to_generate: list):
"""Schedules the resampler.py script to run at the start of every minute.""" """Schedules the resampler.py script."""
# --- GRACEFUL SHUTDOWN HANDLER ---
import signal
shutdown_requested = False
def handle_shutdown_signal(signum, frame):
nonlocal shutdown_requested
try:
logging.info(f"Shutdown signal ({signum}) received. Exiting loop...")
except NameError:
print(f"[ResamplerScheduler] Shutdown signal ({signum}) received. Exiting loop...")
shutdown_requested = True
signal.signal(signal.SIGTERM, handle_shutdown_signal)
signal.signal(signal.SIGINT, handle_shutdown_signal)
# --- END GRACEFUL SHUTDOWN HANDLER ---
setup_logging('off', 'ResamplerScheduler') setup_logging('off', 'ResamplerScheduler')
# Run once at startup run_resampler_job(timeframes_to_generate)
run_resampler_job(STANDARD_RESAMPLING_TIMEFRAMES)
# Schedule to run every minute at the :01 second mark # Schedule to run every minute at the :01 second mark
schedule.every().minute.at(":01").do(run_resampler_job, timeframes_to_generate=STANDARD_RESAMPLING_TIMEFRAMES) schedule.every().minute.at(":01").do(run_resampler_job, timeframes_to_generate=timeframes_to_generate)
logging.info("Resampler scheduled to run every minute at :01.") logging.info("Resampler scheduled to run every minute at :01.")
while True:
while not shutdown_requested: # <-- MODIFIED
schedule.run_pending() schedule.run_pending()
time.sleep(1) # Check every second to not miss the scheduled time time.sleep(0.5) # Check every 500ms to not miss the scheduled time and be responsive
logging.info("ResamplerScheduler shutting down.")
def run_market_cap_fetcher_job(): # --- REMOVED: run_market_cap_fetcher_job function ---
"""Defines the job for the market cap fetcher, redirecting output."""
log_file = os.path.join(LOGS_DIR, "market_cap_fetcher.log") # --- REMOVED: market_cap_fetcher_scheduler function ---
def run_trade_executor(order_execution_queue: multiprocessing.Queue):
"""
Target function to run the TradeExecutor class in a resilient loop.
It now consumes from the order_execution_queue.
"""
# --- GRACEFUL SHUTDOWN HANDLER ---
import signal
def handle_shutdown_signal(signum, frame):
# We can just raise KeyboardInterrupt, as it's handled below
logging.info(f"Shutdown signal ({signum}) received. Initiating graceful exit...")
raise KeyboardInterrupt
signal.signal(signal.SIGTERM, handle_shutdown_signal)
# --- END GRACEFUL SHUTDOWN HANDLER ---
log_file_path = os.path.join(LOGS_DIR, "trade_executor.log")
try: try:
command = [sys.executable, MARKET_CAP_FETCHER_SCRIPT, "--coins"] + WATCHED_COINS + ["--log-level", "off"] sys.stdout = open(log_file_path, 'a', buffering=1)
with open(log_file, 'a') as f: sys.stderr = sys.stdout
f.write(f"\n--- Starting {MARKET_CAP_FETCHER_SCRIPT} job at {datetime.now()} ---\n")
subprocess.run(command, check=True, stdout=f, stderr=subprocess.STDOUT)
except Exception as e: except Exception as e:
with open(log_file, 'a') as f: print(f"Failed to open log file for TradeExecutor: {e}")
f.write(f"\n--- SCHEDULER ERROR at {datetime.now()} ---\n")
f.write(f"Failed to run {MARKET_CAP_FETCHER_SCRIPT} job: {e}\n")
setup_logging('normal', f"TradeExecutor")
logging.info("\n--- Starting Trade Executor process ---")
def market_cap_fetcher_scheduler():
"""Schedules the market_cap_fetcher.py script to run daily at a specific UTC time."""
setup_logging('off', 'MarketCapScheduler')
schedule.every().day.at("00:15", "UTC").do(run_market_cap_fetcher_job)
while True:
schedule.run_pending()
time.sleep(60)
def run_strategy(strategy_name: str, config: dict):
"""Target function to run a strategy, redirecting its output to a log file."""
log_file = os.path.join(LOGS_DIR, f"strategy_{strategy_name}.log")
script_name = config['script']
command = [sys.executable, script_name, "--name", strategy_name, "--log-level", "normal"]
while True: while True:
try: try:
with open(log_file, 'a') as f: from trade_executor import TradeExecutor
f.write(f"\n--- Starting strategy '{strategy_name}' at {datetime.now()} ---\n")
subprocess.run(command, check=True, stdout=f, stderr=subprocess.STDOUT) executor = TradeExecutor(log_level="normal", order_execution_queue=order_execution_queue)
except (subprocess.CalledProcessError, Exception) as e:
with open(log_file, 'a') as f: # --- REVERTED: Call executor.run() directly ---
f.write(f"\n--- PROCESS ERROR at {datetime.now()} ---\n") executor.run()
f.write(f"Strategy '{strategy_name}' failed: {e}. Restarting...\n")
except KeyboardInterrupt:
logging.info("Trade Executor interrupted. Exiting.")
return
except Exception as e:
logging.error(f"Trade Executor failed: {e}. Restarting...\n", exc_info=True)
time.sleep(10) time.sleep(10)
def run_trade_executor(): def run_position_manager(trade_signal_queue: multiprocessing.Queue, order_execution_queue: multiprocessing.Queue):
"""Target function to run the trade_executor.py script in a resilient loop.""" """
log_file = os.path.join(LOGS_DIR, "trade_executor.log") Target function to run the PositionManager class in a resilient loop.
Consumes from trade_signal_queue, produces for order_execution_queue.
"""
# --- GRACEFUL SHUTDOWN HANDLER ---
import signal
def handle_shutdown_signal(signum, frame):
# Raise KeyboardInterrupt, as it's handled by the loop
logging.info(f"Shutdown signal ({signum}) received. Initiating graceful exit...")
raise KeyboardInterrupt
signal.signal(signal.SIGTERM, handle_shutdown_signal)
# --- END GRACEFUL SHUTDOWN HANDLER ---
log_file_path = os.path.join(LOGS_DIR, "position_manager.log")
try:
sys.stdout = open(log_file_path, 'a', buffering=1)
sys.stderr = sys.stdout
except Exception as e:
print(f"Failed to open log file for PositionManager: {e}")
setup_logging('normal', f"PositionManager")
logging.info("\n--- Starting Position Manager process ---")
while True:
try:
from position_manager import PositionManager
manager = PositionManager(
log_level="normal",
trade_signal_queue=trade_signal_queue,
order_execution_queue=order_execution_queue
)
# --- REVERTED: Call manager.run() directly ---
manager.run()
except KeyboardInterrupt:
logging.info("Position Manager interrupted. Exiting.")
return
except Exception as e:
logging.error(f"Position Manager failed: {e}. Restarting...\n", exc_info=True)
time.sleep(10)
def run_strategy(strategy_name: str, config: dict, trade_signal_queue: multiprocessing.Queue):
"""
This function BECOMES the strategy runner. It is executed as a separate
process and pushes signals to the shared queue.
"""
# These imports only happen in the new, lightweight process
import importlib
import os
import sys
import time
import logging
import signal # <-- ADDED
from logging_utils import setup_logging
from strategies.base_strategy import BaseStrategy
# --- GRACEFUL SHUTDOWN HANDLER ---
def handle_shutdown_signal(signum, frame):
# Raise KeyboardInterrupt, as it's handled by the loop
try:
logging.info(f"Shutdown signal ({signum}) received. Initiating graceful exit...")
except NameError:
print(f"[Strategy-{strategy_name}] Shutdown signal ({signum}) received. Initiating graceful exit...")
raise KeyboardInterrupt
signal.signal(signal.SIGTERM, handle_shutdown_signal)
# --- END GRACEFUL SHUTDOWN HANDLER ---
# --- Setup logging to file for this specific process ---
log_file_path = os.path.join(LOGS_DIR, f"strategy_{strategy_name}.log")
try:
sys.stdout = open(log_file_path, 'a', buffering=1) # 1 = line buffering
sys.stderr = sys.stdout
except Exception as e:
print(f"Failed to open log file for {strategy_name}: {e}")
setup_logging('normal', f"Strategy-{strategy_name}")
while True:
try:
logging.info(f"--- Starting strategy '{strategy_name}' ---")
if 'class' not in config:
logging.error(f"Strategy config for '{strategy_name}' is missing the 'class' key. Exiting.")
return
module_path, class_name = config['class'].rsplit('.', 1)
module = importlib.import_module(module_path)
StrategyClass = getattr(module, class_name)
strategy = StrategyClass(strategy_name, config['parameters'], trade_signal_queue)
if config.get("is_event_driven", False):
logging.info(f"Starting EVENT-DRIVEN logic loop...")
strategy.run_event_loop() # This is a blocking call
else:
logging.info(f"Starting POLLING logic loop...")
strategy.run_polling_loop() # This is the original blocking call
# --- REVERTED: Added back simple KeyboardInterrupt handler ---
except KeyboardInterrupt:
logging.info(f"Strategy {strategy_name} process stopping.")
return
except Exception as e:
# --- REVERTED: Removed specific check for KeyboardInterrupt ---
logging.error(f"Strategy '{strategy_name}' failed: {e}", exc_info=True)
logging.info("Restarting strategy in 10 seconds...")
time.sleep(10)
def run_dashboard_data_fetcher():
"""Target function to run the dashboard_data_fetcher.py script."""
# --- GRACEFUL SHUTDOWN HANDLER ---
import signal
def handle_shutdown_signal(signum, frame):
# Raise KeyboardInterrupt, as it's handled by the loop
try:
logging.info(f"Shutdown signal ({signum}) received. Initiating graceful exit...")
except NameError:
print(f"[DashboardDataFetcher] Shutdown signal ({signum}) received. Initiating graceful exit...")
raise KeyboardInterrupt
signal.signal(signal.SIGTERM, handle_shutdown_signal)
# --- END GRACEFUL SHUTDOWN HANDLER ---
log_file = os.path.join(LOGS_DIR, "dashboard_data_fetcher.log")
while True: while True:
try: try:
with open(log_file, 'a') as f: with open(log_file, 'a') as f:
f.write(f"\n--- Starting Trade Executor at {datetime.now()} ---\n") f.write(f"\n--- Starting Dashboard Data Fetcher at {datetime.now()} ---\n")
subprocess.run([sys.executable, TRADE_EXECUTOR_SCRIPT, "--log-level", "normal"], check=True, stdout=f, stderr=subprocess.STDOUT) subprocess.run([sys.executable, DASHBOARD_DATA_FETCHER_SCRIPT, "--log-level", "normal"], check=True, stdout=f, stderr=subprocess.STDOUT)
except KeyboardInterrupt: # --- MODIFIED: Added to catch interrupt ---
logging.info("Dashboard Data Fetcher stopping.")
break
except (subprocess.CalledProcessError, Exception) as e: except (subprocess.CalledProcessError, Exception) as e:
with open(log_file, 'a') as f: with open(log_file, 'a') as f:
f.write(f"\n--- PROCESS ERROR at {datetime.now()} ---\n") f.write(f"\n--- PROCESS ERROR at {datetime.now()} ---\n")
f.write(f"Trade Executor failed: {e}. Restarting...\n") f.write(f"Dashboard Data Fetcher failed: {e}. Restarting...\n")
time.sleep(10) time.sleep(10)
@ -145,7 +348,7 @@ class MainApp:
self.watched_coins = coins_to_watch self.watched_coins = coins_to_watch
self.shared_prices = shared_prices self.shared_prices = shared_prices
self.prices = {} self.prices = {}
self.market_caps = {} # --- REMOVED: self.market_caps ---
self.open_positions = {} self.open_positions = {}
self.background_processes = processes self.background_processes = processes
self.process_status = {} self.process_status = {}
@ -155,23 +358,15 @@ class MainApp:
def read_prices(self): def read_prices(self):
"""Reads the latest prices directly from the shared memory dictionary.""" """Reads the latest prices directly from the shared memory dictionary."""
try: try:
self.prices = dict(self.shared_prices) # --- FIX: Use .copy() for thread-safe iteration ---
self.prices = self.shared_prices.copy()
except Exception as e: except Exception as e:
logging.debug(f"Could not read from shared prices dict: {e}") logging.debug(f"Could not read from shared prices dict: {e}")
def read_market_caps(self): # --- REMOVED: read_market_caps method ---
if os.path.exists(MARKET_CAP_SUMMARY_FILE):
try:
with open(MARKET_CAP_SUMMARY_FILE, 'r', encoding='utf-8') as f:
summary_data = json.load(f)
for coin in self.watched_coins:
table_key = f"{coin}_market_cap"
if table_key in summary_data:
self.market_caps[coin] = summary_data[table_key].get('market_cap')
except (json.JSONDecodeError, IOError):
logging.debug("Could not read market cap summary file.")
def read_strategy_statuses(self): def read_strategy_statuses(self):
"""Reads the status JSON file for each enabled strategy."""
enabled_statuses = {} enabled_statuses = {}
for name, config in self.strategy_configs.items(): for name, config in self.strategy_configs.items():
if config.get("enabled", False): if config.get("enabled", False):
@ -187,38 +382,84 @@ class MainApp:
self.strategy_statuses = enabled_statuses self.strategy_statuses = enabled_statuses
def read_executor_status(self): def read_executor_status(self):
"""Reads the live status file from the trade executor."""
if os.path.exists(TRADE_EXECUTOR_STATUS_FILE): if os.path.exists(TRADE_EXECUTOR_STATUS_FILE):
try: try:
with open(TRADE_EXECUTOR_STATUS_FILE, 'r', encoding='utf-8') as f: with open(TRADE_EXECUTOR_STATUS_FILE, 'r', encoding='utf-8') as f:
self.open_positions = json.load(f) # --- FIX: Read the 'open_positions' key from the file ---
status_data = json.load(f)
self.open_positions = status_data.get('open_positions', {})
except (IOError, json.JSONDecodeError): except (IOError, json.JSONDecodeError):
logging.debug("Could not read trade executor status file.") logging.debug("Could not read trade executor status file.")
else: else:
self.open_positions = {} self.open_positions = {}
def check_process_status(self): def check_process_status(self):
"""Checks if the background processes are still running."""
for name, process in self.background_processes.items(): for name, process in self.background_processes.items():
self.process_status[name] = "Running" if process.is_alive() else "STOPPED" self.process_status[name] = "Running" if process.is_alive() else "STOPPED"
def _format_price(self, price_val, width=10):
"""Helper function to format prices for the dashboard."""
try:
price_float = float(price_val)
if price_float < 1:
price_str = f"{price_float:>{width}.6f}"
elif price_float < 100:
price_str = f"{price_float:>{width}.4f}"
else:
price_str = f"{price_float:>{width}.2f}"
except (ValueError, TypeError):
price_str = f"{'Loading...':>{width}}"
return price_str
def display_dashboard(self): def display_dashboard(self):
print("\x1b[H\x1b[J", end="") """Displays a formatted dashboard with side-by-side tables."""
print("\x1b[H\x1b[J", end="") # Clear screen
left_table_lines = ["--- Market Dashboard ---"] left_table_lines = ["--- Market Dashboard ---"]
left_table_width = 44 # --- MODIFIED: Adjusted width for new columns ---
left_table_width = 65
left_table_lines.append("-" * left_table_width) left_table_lines.append("-" * left_table_width)
left_table_lines.append(f"{'#':<2} | {'Coin':^6} | {'Live Price':>10} | {'Market Cap':>15} |") # --- MODIFIED: Replaced Market Cap with Gap ---
left_table_lines.append(f"{'#':<2} | {'Coin':^6} | {'Best Bid':>10} | {'Live Price':>10} | {'Best Ask':>10} | {'Gap':>10} |")
left_table_lines.append("-" * left_table_width) left_table_lines.append("-" * left_table_width)
for i, coin in enumerate(self.watched_coins, 1): for i, coin in enumerate(self.watched_coins, 1):
price = self.prices.get(coin, "Loading...") # --- MODIFIED: Fetch all three price types ---
market_cap = self.market_caps.get(coin) mid_price = self.prices.get(coin, "Loading...")
formatted_mc = format_market_cap(market_cap) bid_price = self.prices.get(f"{coin}_bid", "Loading...")
left_table_lines.append(f"{i:<2} | {coin:^6} | {price:>10} | {formatted_mc:>15} |") ask_price = self.prices.get(f"{coin}_ask", "Loading...")
# --- MODIFIED: Use the new formatting helper ---
formatted_mid = self._format_price(mid_price)
formatted_bid = self._format_price(bid_price)
formatted_ask = self._format_price(ask_price)
# --- MODIFIED: Calculate gap ---
gap_str = f"{'Loading...':>10}"
try:
# Calculate the spread
gap_val = float(ask_price) - float(bid_price)
# Format gap with high precision, similar to price
if gap_val < 1:
gap_str = f"{gap_val:>{10}.6f}"
else:
gap_str = f"{gap_val:>{10}.4f}"
except (ValueError, TypeError):
pass # Keep 'Loading...'
# --- REMOVED: Market Cap logic ---
# --- MODIFIED: Print all price columns including gap ---
left_table_lines.append(f"{i:<2} | {coin:^6} | {formatted_bid} | {formatted_mid} | {formatted_ask} | {gap_str} |")
left_table_lines.append("-" * left_table_width) left_table_lines.append("-" * left_table_width)
right_table_lines = ["--- Strategy Status ---"] right_table_lines = ["--- Strategy Status ---"]
right_table_width = 154 # --- FIX: Adjusted table width after removing parameters ---
right_table_width = 105
right_table_lines.append("-" * right_table_width) right_table_lines.append("-" * right_table_width)
right_table_lines.append(f"{'#':^2} | {'Strategy Name':<25} | {'Coin':^6} | {'Signal':^8} | {'Signal Price':>12} | {'Last Change':>17} | {'TF':^5} | {'Size':^8} | {'Parameters':<45} |") # --- FIX: Removed 'Parameters' from header ---
right_table_lines.append(f"{'#':^2} | {'Strategy Name':<25} | {'Coin':^6} | {'Signal':^8} | {'Signal Price':>12} | {'Last Change':>17} | {'TF':^5} | {'Size':^8} |")
right_table_lines.append("-" * right_table_width) right_table_lines.append("-" * right_table_width)
for i, (name, status) in enumerate(self.strategy_statuses.items(), 1): for i, (name, status) in enumerate(self.strategy_statuses.items(), 1):
signal = status.get('current_signal', 'N/A') signal = status.get('current_signal', 'N/A')
@ -232,13 +473,37 @@ class MainApp:
last_change_display = dt_local.strftime('%Y-%m-%d %H:%M') last_change_display = dt_local.strftime('%Y-%m-%d %H:%M')
config_params = self.strategy_configs.get(name, {}).get('parameters', {}) config_params = self.strategy_configs.get(name, {}).get('parameters', {})
coin = config_params.get('coin', 'N/A')
timeframe = config_params.get('timeframe', 'N/A') # --- FIX: Read coin/size from status file first, fallback to config ---
coin = status.get('coin', config_params.get('coin', 'N/A'))
# --- FIX: Handle nested 'coins_to_copy' logic for size ---
# --- MODIFIED: Read 'size' from status first, then config, then 'Multi' ---
size = status.get('size')
if not size:
if 'coins_to_copy' in config_params:
size = 'Multi'
else:
size = config_params.get('size', 'N/A') size = config_params.get('size', 'N/A')
other_params = {k: v for k, v in config_params.items() if k not in ['coin', 'timeframe', 'size']} timeframe = config_params.get('timeframe', 'N/A')
params_str = ", ".join([f"{k}={v}" for k, v in other_params.items()])
right_table_lines.append(f"{i:^2} | {name:<25} | {coin:^6} | {signal:^8} | {price_display:>12} | {last_change_display:>17} | {timeframe:^5} | {size:>8} | {params_str:<45} |") # --- FIX: Removed parameter string logic ---
# --- FIX: Removed 'params_str' from the formatted line ---
size_display = f"{size:>8}"
if isinstance(size, (int, float)):
# --- MODIFIED: More flexible size formatting ---
if size < 0.0001:
size_display = f"{size:>8.6f}"
elif size < 1:
size_display = f"{size:>8.4f}"
else:
size_display = f"{size:>8.2f}"
# --- END NEW LOGIC ---
right_table_lines.append(f"{i:^2} | {name:<25} | {coin:^6} | {signal:^8} | {price_display:>12} | {last_change_display:>17} | {timeframe:^5} | {size_display} |")
right_table_lines.append("-" * right_table_width) right_table_lines.append("-" * right_table_width)
output_lines = [] output_lines = []
@ -256,38 +521,33 @@ class MainApp:
output_lines.append(f"{'Account':<10} | {'Coin':<6} | {'Size':>15} | {'Entry Price':>12} | {'Mark Price':>12} | {'PNL':>15} | {'Leverage':>10} |") output_lines.append(f"{'Account':<10} | {'Coin':<6} | {'Size':>15} | {'Entry Price':>12} | {'Mark Price':>12} | {'PNL':>15} | {'Leverage':>10} |")
output_lines.append("-" * pos_table_width) output_lines.append("-" * pos_table_width)
perps_positions = self.open_positions.get('perpetuals_account', {}).get('open_positions', []) # --- FIX: Correctly read and display open positions ---
spot_positions = self.open_positions.get('spot_account', {}).get('positions', []) if not self.open_positions:
output_lines.append(f"{'No open positions.':^{pos_table_width}}")
if not perps_positions and not spot_positions:
output_lines.append("No open positions found.")
else: else:
for pos in perps_positions: for account, positions in self.open_positions.items():
if not positions:
continue
for coin, pos in positions.items():
try: try:
pnl = float(pos.get('pnl', 0.0)) size_f = float(pos.get('size', 0))
pnl_str = f"${pnl:,.2f}" entry_f = float(pos.get('entry_price', 0))
mark_f = float(self.prices.get(coin, 0))
pnl_f = (mark_f - entry_f) * size_f if size_f > 0 else (entry_f - mark_f) * abs(size_f)
lev = pos.get('leverage', 1)
size_str = f"{size_f:>{15}.5f}"
entry_str = f"{entry_f:>{12}.2f}"
mark_str = f"{mark_f:>{12}.2f}"
pnl_str = f"{pnl_f:>{15}.2f}"
lev_str = f"{lev}x"
output_lines.append(f"{account:<10} | {coin:<6} | {size_str} | {entry_str} | {mark_str} | {pnl_str} | {lev_str:>10} |")
except (ValueError, TypeError): except (ValueError, TypeError):
pnl_str = "Error" output_lines.append(f"{account:<10} | {coin:<6} | {'Error parsing data...':^{pos_table_width-20}} |")
coin = pos.get('coin') or '-'
size = pos.get('size') or '-'
entry_price = pos.get('entry_price') or '-'
mark_price = pos.get('mark_price') or '-'
leverage = pos.get('leverage') or '-'
output_lines.append(f"{'Perps':<10} | {coin:<6} | {size:>15} | {entry_price:>12} | {mark_price:>12} | {pnl_str:>15} | {leverage:>10} |")
for pos in spot_positions:
pnl = pos.get('pnl', 'N/A')
coin = pos.get('coin') or '-'
balance_size = pos.get('balance_size') or '-'
output_lines.append(f"{'Spot':<10} | {coin:<6} | {balance_size:>15} | {'-':>12} | {'-':>12} | {pnl:>15} | {'-':>10} |")
output_lines.append("-" * pos_table_width) output_lines.append("-" * pos_table_width)
output_lines.append("\n--- Background Processes ---")
for name, status in self.process_status.items():
output_lines.append(f"{name:<25}: {status}")
final_output = "\n".join(output_lines) final_output = "\n".join(output_lines)
print(final_output) print(final_output)
sys.stdout.flush() sys.stdout.flush()
@ -296,10 +556,10 @@ class MainApp:
"""Main loop to read data, display dashboard, and check processes.""" """Main loop to read data, display dashboard, and check processes."""
while True: while True:
self.read_prices() self.read_prices()
self.read_market_caps() # --- REMOVED: self.read_market_caps() ---
self.read_strategy_statuses() self.read_strategy_statuses()
self.read_executor_status() self.read_executor_status()
self.check_process_status() # --- REMOVED: self.check_process_status() ---
self.display_dashboard() self.display_dashboard()
time.sleep(0.5) time.sleep(0.5)
@ -310,7 +570,7 @@ if __name__ == "__main__":
os.makedirs(LOGS_DIR) os.makedirs(LOGS_DIR)
processes = {} processes = {}
strategy_configs = {} # --- REVERTED: Removed process groups ---
try: try:
with open(STRATEGY_CONFIG_FILE, 'r') as f: with open(STRATEGY_CONFIG_FILE, 'r') as f:
@ -319,23 +579,53 @@ if __name__ == "__main__":
logging.error(f"Could not load strategies from '{STRATEGY_CONFIG_FILE}': {e}") logging.error(f"Could not load strategies from '{STRATEGY_CONFIG_FILE}': {e}")
sys.exit(1) sys.exit(1)
# --- FIX: Hardcoded timeframes ---
required_timeframes = [
"3m", "5m", "15m", "30m", "1h", "2h", "4h", "8h",
"12h", "1d", "3d", "1w", "1M", "148m", "37m"
]
logging.info(f"Using fixed timeframes for resampler: {required_timeframes}")
with multiprocessing.Manager() as manager: with multiprocessing.Manager() as manager:
shared_prices = manager.dict() shared_prices = manager.dict()
# --- FIX: Create TWO queues ---
trade_signal_queue = manager.Queue()
order_execution_queue = manager.Queue()
processes["Live Market Feed"] = multiprocessing.Process(target=start_live_feed, args=(shared_prices, 'off'), daemon=True) # --- REVERTED: All processes are daemon=True and in one dict ---
# --- FIX: Pass WATCHED_COINS to the start_live_feed process ---
# --- MODIFICATION: Set log level back to 'off' ---
processes["Live Market Feed"] = multiprocessing.Process(
target=start_live_feed,
args=(shared_prices, WATCHED_COINS, 'off'),
daemon=True
)
processes["Live Candle Fetcher"] = multiprocessing.Process(target=run_live_candle_fetcher, daemon=True) processes["Live Candle Fetcher"] = multiprocessing.Process(target=run_live_candle_fetcher, daemon=True)
# --- FIX: The resampler now uses a fixed list of TFs and a new schedule --- processes["Resampler"] = multiprocessing.Process(target=resampler_scheduler, args=(list(required_timeframes),), daemon=True)
processes["Resampler"] = multiprocessing.Process(target=resampler_scheduler, daemon=True) # --- REMOVED: Market Cap Fetcher Process ---
processes["Market Cap Fetcher"] = multiprocessing.Process(target=market_cap_fetcher_scheduler, daemon=True) processes["Dashboard Data"] = multiprocessing.Process(target=run_dashboard_data_fetcher, daemon=True)
processes["Trade Executor"] = multiprocessing.Process(target=run_trade_executor, daemon=True)
processes["Position Manager"] = multiprocessing.Process(
target=run_position_manager,
args=(trade_signal_queue, order_execution_queue),
daemon=True
)
processes["Trade Executor"] = multiprocessing.Process(
target=run_trade_executor,
args=(order_execution_queue,),
daemon=True
)
for name, config in strategy_configs.items(): for name, config in strategy_configs.items():
if config.get("enabled", False): if config.get("enabled", False):
if not os.path.exists(config['script']): if 'class' not in config:
logging.error(f"Strategy script '{config['script']}' for '{name}' not found. Skipping.") logging.error(f"Strategy '{name}' is missing 'class' key. Skipping.")
continue continue
proc = multiprocessing.Process(target=run_strategy, args=(name, config), daemon=True) proc = multiprocessing.Process(target=run_strategy, args=(name, config, trade_signal_queue), daemon=True)
processes[f"Strategy: {name}"] = proc processes[f"Strategy: {name}"] = proc # Add to strategy group
# --- REVERTED: Removed combined dict ---
for name, proc in processes.items(): for name, proc in processes.items():
logging.info(f"Starting process '{name}'...") logging.info(f"Starting process '{name}'...")
@ -347,11 +637,49 @@ if __name__ == "__main__":
try: try:
app.run() app.run()
except KeyboardInterrupt: except KeyboardInterrupt:
# --- MODIFIED: Staged shutdown ---
logging.info("Shutting down...") logging.info("Shutting down...")
for proc in processes.values():
if proc.is_alive(): proc.terminate() strategy_procs = {}
for proc in processes.values(): other_procs = {}
if proc.is_alive(): proc.join() for name, proc in processes.items():
if name.startswith("Strategy:"):
strategy_procs[name] = proc
else:
other_procs[name] = proc
# --- 1. Terminate strategy processes ---
logging.info("Shutting down strategy processes first...")
for name, proc in strategy_procs.items():
if proc.is_alive():
logging.info(f"Terminating process: '{name}'...")
proc.terminate()
# --- 2. Wait for 5 seconds ---
logging.info("Waiting 5 seconds for strategies to close...")
time.sleep(5)
# --- 3. Terminate all other processes ---
logging.info("Shutting down remaining core processes...")
for name, proc in other_procs.items():
if proc.is_alive():
logging.info(f"Terminating process: '{name}'...")
proc.terminate()
# --- 4. Join all processes (strategies and others) ---
logging.info("Waiting for all processes to join...")
for name, proc in processes.items(): # Iterate over the original dict to get all
if proc.is_alive():
logging.info(f"Waiting for process '{name}' to join...")
proc.join(timeout=5) # Wait up to 5 seconds
if proc.is_alive():
# If it's still alive, force kill
logging.warning(f"Process '{name}' did not terminate, forcing kill.")
proc.kill()
# --- END MODIFIED ---
logging.info("Shutdown complete.") logging.info("Shutdown complete.")
sys.exit(0) sys.exit(0)

View File

@ -8,48 +8,107 @@ import requests
import time import time
from datetime import datetime, timezone, timedelta from datetime import datetime, timezone, timedelta
import json import json
from dotenv import load_dotenv
load_dotenv()
# Assuming logging_utils.py is in the same directory
from logging_utils import setup_logging from logging_utils import setup_logging
class MarketCapFetcher: class MarketCapFetcher:
""" """
Fetches historical daily market cap data from the CoinGecko API and Fetches historical daily market cap data from the CoinGecko API and
intelligently updates the SQLite database. It processes individual coins, intelligently upserts it into the SQLite database for all coins.
aggregates stablecoins, and captures total market cap metrics.
""" """
COIN_ID_MAP = { def __init__(self, log_level: str):
"BTC": "bitcoin",
"ETH": "ethereum",
"SOL": "solana",
"BNB": "binancecoin",
"HYPE": "hyperliquid",
"ASTER": "astar",
"ZEC": "zcash",
"PUMP": "pump-fun", # Correct ID is 'pump-fun'
"SUI": "sui"
}
STABLECOIN_ID_MAP = {
"USDT": "tether",
"USDC": "usd-coin",
"USDE": "ethena-usde",
"DAI": "dai",
"PYUSD": "paypal-usd"
}
def __init__(self, log_level: str, coins: list):
setup_logging(log_level, 'MarketCapFetcher') setup_logging(log_level, 'MarketCapFetcher')
self.coins_to_fetch = coins
self.db_path = os.path.join("_data", "market_data.db") self.db_path = os.path.join("_data", "market_data.db")
self.api_base_url = "https://api.coingecko.com/api/v3" self.api_base_url = "https://api.coingecko.com/api/v3"
self.api_key = os.environ.get("COINGECKO_API_KEY") self.api_key = os.environ.get("COINGECKO_API_KEY")
if not self.api_key: if not self.api_key:
logging.error("CoinGecko API key not found. Please set the COINGECKO_API_KEY environment variable.") logging.error("CoinGecko API key not found. Please set the COINGECKO_API_KEY environment variable.")
sys.exit(1) sys.exit(1)
self.COIN_ID_MAP = self._load_coin_id_map()
if not self.COIN_ID_MAP:
logging.error("Coin ID map is empty. Run 'update_coin_map.py' to generate it.")
sys.exit(1)
self.coins_to_fetch = list(self.COIN_ID_MAP.keys())
self.STABLECOIN_ID_MAP = {
"USDT": "tether", "USDC": "usd-coin", "USDE": "ethena-usde",
"DAI": "dai", "PYUSD": "paypal-usd"
}
self._ensure_tables_exist()
def _ensure_tables_exist(self):
"""Ensures all market cap tables exist with timestamp_ms as PRIMARY KEY."""
all_tables_to_check = [f"{coin}_market_cap" for coin in self.coins_to_fetch]
all_tables_to_check.extend(["STABLECOINS_market_cap", "TOTAL_market_cap_daily"])
with sqlite3.connect(self.db_path) as conn:
for table_name in all_tables_to_check:
cursor = conn.cursor()
cursor.execute(f"PRAGMA table_info('{table_name}')")
columns = cursor.fetchall()
if columns:
pk_found = any(col[1] == 'timestamp_ms' and col[5] == 1 for col in columns)
if not pk_found:
logging.warning(f"Schema for table '{table_name}' is incorrect. Dropping and recreating table.")
try:
conn.execute(f'DROP TABLE "{table_name}"')
self._create_market_cap_table(conn, table_name)
logging.info(f"Successfully recreated schema for '{table_name}'.")
except Exception as e:
logging.error(f"FATAL: Failed to recreate table '{table_name}': {e}. Please delete 'market_data.db' and restart.")
sys.exit(1)
else:
self._create_market_cap_table(conn, table_name)
logging.info("All market cap table schemas verified.")
def _create_market_cap_table(self, conn, table_name):
"""Creates a new market cap table with the correct schema."""
conn.execute(f'''
CREATE TABLE IF NOT EXISTS "{table_name}" (
datetime_utc TEXT,
timestamp_ms INTEGER PRIMARY KEY,
market_cap REAL
)
''')
def _load_coin_id_map(self) -> dict:
"""Loads the dynamically generated coin-to-id mapping."""
map_file_path = os.path.join("_data", "coin_id_map.json")
try:
with open(map_file_path, 'r') as f:
return json.load(f)
except (FileNotFoundError, json.JSONDecodeError) as e:
logging.error(f"Could not load '{map_file_path}'. Please run 'update_coin_map.py' first. Error: {e}")
return {}
def _upsert_market_cap_data(self, conn, table_name: str, df: pd.DataFrame):
"""Upserts a DataFrame of market cap data into the specified table."""
if df.empty:
return
records_to_upsert = []
for index, row in df.iterrows():
records_to_upsert.append((
row['datetime_utc'].strftime('%Y-%m-%d %H:%M:%S'),
row['timestamp_ms'],
row['market_cap']
))
cursor = conn.cursor()
cursor.executemany(f'''
INSERT OR REPLACE INTO "{table_name}" (datetime_utc, timestamp_ms, market_cap)
VALUES (?, ?, ?)
''', records_to_upsert)
conn.commit()
logging.info(f"Successfully upserted {len(records_to_upsert)} records into '{table_name}'.")
def run(self): def run(self):
""" """
Main execution function to process all configured coins and update the database. Main execution function to process all configured coins and update the database.
@ -58,7 +117,6 @@ class MarketCapFetcher:
with sqlite3.connect(self.db_path) as conn: with sqlite3.connect(self.db_path) as conn:
conn.execute("PRAGMA journal_mode=WAL;") conn.execute("PRAGMA journal_mode=WAL;")
# 1. Process individual coins
for coin_symbol in self.coins_to_fetch: for coin_symbol in self.coins_to_fetch:
coin_id = self.COIN_ID_MAP.get(coin_symbol.upper()) coin_id = self.COIN_ID_MAP.get(coin_symbol.upper())
if not coin_id: if not coin_id:
@ -71,30 +129,21 @@ class MarketCapFetcher:
logging.error(f"An unexpected error occurred while processing {coin_symbol}: {e}") logging.error(f"An unexpected error occurred while processing {coin_symbol}: {e}")
time.sleep(2) time.sleep(2)
# 2. Process and aggregate stablecoins
self._update_stablecoin_aggregate(conn) self._update_stablecoin_aggregate(conn)
# 3. Process total market cap metrics
self._update_total_market_cap(conn) self._update_total_market_cap(conn)
# 4. Save a summary of the latest data
self._save_summary(conn) self._save_summary(conn)
logging.info("--- Market cap fetch process complete ---") logging.info("--- Market cap fetch process complete ---")
def _save_summary(self, conn): def _save_summary(self, conn):
""" # ... (This function is unchanged)
Queries the last record from each market cap table and saves a summary to a JSON file.
"""
logging.info("--- Generating Market Cap Summary ---") logging.info("--- Generating Market Cap Summary ---")
summary_data = {} summary_data = {}
summary_file_path = os.path.join("_data", "market_cap_data.json") summary_file_path = os.path.join("_data", "market_cap_data.json")
try: try:
cursor = conn.cursor() cursor = conn.cursor()
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND (name LIKE '%_market_cap' OR name LIKE 'TOTAL_%');") cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND (name LIKE '%_market_cap' OR name LIKE 'TOTAL_%');")
tables = [row[0] for row in cursor.fetchall()] tables = [row[0] for row in cursor.fetchall()]
for table_name in tables: for table_name in tables:
try: try:
df_last = pd.read_sql(f'SELECT * FROM "{table_name}" ORDER BY datetime_utc DESC LIMIT 1', conn) df_last = pd.read_sql(f'SELECT * FROM "{table_name}" ORDER BY datetime_utc DESC LIMIT 1', conn)
@ -102,40 +151,24 @@ class MarketCapFetcher:
summary_data[table_name] = df_last.to_dict('records')[0] summary_data[table_name] = df_last.to_dict('records')[0]
except Exception as e: except Exception as e:
logging.error(f"Could not read last record from table '{table_name}': {e}") logging.error(f"Could not read last record from table '{table_name}': {e}")
if summary_data: if summary_data:
summary_data['summary_last_updated_utc'] = datetime.now(timezone.utc).isoformat() summary_data['summary_last_updated_utc'] = datetime.now(timezone.utc).isoformat()
with open(summary_file_path, 'w', encoding='utf-8') as f: with open(summary_file_path, 'w', encoding='utf-8') as f:
json.dump(summary_data, f, indent=4) json.dump(summary_data, f, indent=4)
logging.info(f"Successfully saved market cap summary to '{summary_file_path}'") logging.info(f"Successfully saved market cap summary to '{summary_file_path}'")
else: else:
logging.warning("No data found to create a summary.") logging.warning("No data found to create a summary.")
except Exception as e: except Exception as e:
logging.error(f"Failed to generate summary: {e}") logging.error(f"Failed to generate summary: {e}")
def _update_total_market_cap(self, conn): def _update_total_market_cap(self, conn):
""" """Fetches the current total market cap and upserts it for the current date."""
Fetches the current total market cap and saves it for the current date.
"""
logging.info("--- Processing Total Market Cap ---") logging.info("--- Processing Total Market Cap ---")
table_name = "TOTAL_market_cap_daily" table_name = "TOTAL_market_cap_daily"
try: try:
# --- FIX: Use the current date instead of yesterday's ---
today_date = datetime.now(timezone.utc).date() today_date = datetime.now(timezone.utc).date()
today_dt = pd.to_datetime(today_date)
cursor = conn.cursor() today_ts = int(today_dt.timestamp() * 1000)
cursor.execute(f"SELECT name FROM sqlite_master WHERE type='table' AND name='{table_name}';")
table_exists = cursor.fetchone()
if table_exists:
# Check if we already have a record for today
cursor.execute(f"SELECT 1 FROM \"{table_name}\" WHERE date(datetime_utc) = ? LIMIT 1", (today_date.isoformat(),))
if cursor.fetchone():
logging.info(f"Total market cap for {today_date} already exists. Skipping.")
return
logging.info("Fetching current global market data...") logging.info("Fetching current global market data...")
url = f"{self.api_base_url}/global" url = f"{self.api_base_url}/global"
@ -147,10 +180,11 @@ class MarketCapFetcher:
if total_mc: if total_mc:
df_total = pd.DataFrame([{ df_total = pd.DataFrame([{
'datetime_utc': pd.to_datetime(today_date), 'datetime_utc': today_dt,
'timestamp_ms': today_ts,
'market_cap': total_mc 'market_cap': total_mc
}]) }])
df_total.to_sql(table_name, conn, if_exists='append', index=False) self._upsert_market_cap_data(conn, table_name, df_total)
logging.info(f"Saved total market cap for {today_date}: ${total_mc:,.2f}") logging.info(f"Saved total market cap for {today_date}: ${total_mc:,.2f}")
except requests.exceptions.RequestException as e: except requests.exceptions.RequestException as e:
@ -158,7 +192,6 @@ class MarketCapFetcher:
except Exception as e: except Exception as e:
logging.error(f"An error occurred while updating total market cap: {e}") logging.error(f"An error occurred while updating total market cap: {e}")
def _update_stablecoin_aggregate(self, conn): def _update_stablecoin_aggregate(self, conn):
"""Fetches data for all stablecoins and saves the aggregated market cap.""" """Fetches data for all stablecoins and saves the aggregated market cap."""
logging.info("--- Processing aggregated stablecoin market cap ---") logging.info("--- Processing aggregated stablecoin market cap ---")
@ -168,7 +201,6 @@ class MarketCapFetcher:
logging.info(f"Fetching historical data for stablecoin: {symbol}...") logging.info(f"Fetching historical data for stablecoin: {symbol}...")
df = self._fetch_historical_data(coin_id, days=365) df = self._fetch_historical_data(coin_id, days=365)
if not df.empty: if not df.empty:
df['coin'] = symbol
all_stablecoin_df = pd.concat([all_stablecoin_df, df]) all_stablecoin_df = pd.concat([all_stablecoin_df, df])
time.sleep(2) time.sleep(2)
@ -176,31 +208,30 @@ class MarketCapFetcher:
logging.warning("No data fetched for any stablecoins. Cannot create aggregate.") logging.warning("No data fetched for any stablecoins. Cannot create aggregate.")
return return
aggregated_df = all_stablecoin_df.groupby(all_stablecoin_df['datetime_utc'].dt.date)['market_cap'].sum().reset_index() aggregated_df = all_stablecoin_df.groupby('timestamp_ms').agg(
aggregated_df['datetime_utc'] = pd.to_datetime(aggregated_df['datetime_utc']) datetime_utc=('datetime_utc', 'first'),
market_cap=('market_cap', 'sum')
).reset_index()
table_name = "STABLECOINS_market_cap" table_name = "STABLECOINS_market_cap"
last_date_in_db = self._get_last_date_from_db(table_name, conn) last_date_in_db = self._get_last_date_from_db(table_name, conn, is_timestamp_ms=True)
if last_date_in_db: if last_date_in_db:
aggregated_df = aggregated_df[aggregated_df['datetime_utc'] > last_date_in_db] aggregated_df = aggregated_df[aggregated_df['timestamp_ms'] > last_date_in_db]
if not aggregated_df.empty: if not aggregated_df.empty:
aggregated_df.to_sql(table_name, conn, if_exists='append', index=False) self._upsert_market_cap_data(conn, table_name, aggregated_df)
logging.info(f"Successfully saved {len(aggregated_df)} daily records to '{table_name}'.")
else: else:
logging.info("Aggregated stablecoin data is already up-to-date.") logging.info("Aggregated stablecoin data is already up-to-date.")
def _update_market_cap_for_coin(self, coin_id: str, coin_symbol: str, conn): def _update_market_cap_for_coin(self, coin_id: str, coin_symbol: str, conn):
"""Fetches and appends new market cap data for a single coin.""" """Fetches and appends new market cap data for a single coin."""
table_name = f"{coin_symbol}_market_cap" table_name = f"{coin_symbol}_market_cap"
last_date_in_db = self._get_last_date_from_db(table_name, conn, is_timestamp_ms=True)
last_date_in_db = self._get_last_date_from_db(table_name, conn)
days_to_fetch = 365 days_to_fetch = 365
if last_date_in_db: if last_date_in_db:
delta_days = (datetime.now() - last_date_in_db).days delta_days = (datetime.now(timezone.utc) - datetime.fromtimestamp(last_date_in_db/1000, tz=timezone.utc)).days
if delta_days <= 0: if delta_days <= 0:
logging.info(f"Market cap data for '{coin_symbol}' is already up-to-date.") logging.info(f"Market cap data for '{coin_symbol}' is already up-to-date.")
return return
@ -215,24 +246,30 @@ class MarketCapFetcher:
return return
if last_date_in_db: if last_date_in_db:
df = df[df['datetime_utc'] > last_date_in_db] df = df[df['timestamp_ms'] > last_date_in_db]
if not df.empty: if not df.empty:
df.to_sql(table_name, conn, if_exists='append', index=False) self._upsert_market_cap_data(conn, table_name, df)
logging.info(f"Successfully saved {len(df)} new daily market cap records for {coin_symbol}.")
else: else:
logging.info(f"Data was fetched, but no new records needed saving for '{coin_symbol}'.") logging.info(f"Data was fetched, but no new records needed saving for '{coin_symbol}'.")
def _get_last_date_from_db(self, table_name: str, conn) -> pd.Timestamp: def _get_last_date_from_db(self, table_name: str, conn, is_timestamp_ms: bool = False):
"""Gets the most recent date from a market cap table as a pandas Timestamp.""" """Gets the most recent date or timestamp from a market cap table."""
try: try:
cursor = conn.cursor() cursor = conn.cursor()
cursor.execute(f"SELECT name FROM sqlite_master WHERE type='table' AND name='{table_name}';") cursor.execute(f"SELECT name FROM sqlite_master WHERE type='table' AND name='{table_name}';")
if not cursor.fetchone(): if not cursor.fetchone():
return None return None
last_date_str = pd.read_sql(f'SELECT MAX(datetime_utc) FROM "{table_name}"', conn).iloc[0, 0] col_to_query = "timestamp_ms" if is_timestamp_ms else "datetime_utc"
return pd.to_datetime(last_date_str) if last_date_str else None last_val = pd.read_sql(f'SELECT MAX({col_to_query}) FROM "{table_name}"', conn).iloc[0, 0]
if pd.isna(last_val):
return None
if is_timestamp_ms:
return int(last_val)
return pd.to_datetime(last_val)
except Exception as e: except Exception as e:
logging.error(f"Could not read last date from table '{table_name}': {e}") logging.error(f"Could not read last date from table '{table_name}': {e}")
return None return None
@ -245,7 +282,7 @@ class MarketCapFetcher:
try: try:
logging.debug(f"Fetching last {days} days for {coin_id}...") logging.debug(f"Fetching last {days} days for {coin_id}...")
response = requests.get(url, headers=headers) response = requests.get(url, headers=headers, params=params)
response.raise_for_status() response.raise_for_status()
data = response.json() data = response.json()
@ -253,9 +290,16 @@ class MarketCapFetcher:
if not market_caps: return pd.DataFrame() if not market_caps: return pd.DataFrame()
df = pd.DataFrame(market_caps, columns=['timestamp_ms', 'market_cap']) df = pd.DataFrame(market_caps, columns=['timestamp_ms', 'market_cap'])
df['datetime_utc'] = pd.to_datetime(df['timestamp_ms'], unit='ms')
df.drop_duplicates(subset=['datetime_utc'], keep='last', inplace=True) # --- FIX: Normalize all timestamps to the start of the day (00:00:00 UTC) ---
return df[['datetime_utc', 'market_cap']] # This prevents duplicate entries for the same day (e.g., a "live" candle vs. the daily one)
df['datetime_utc'] = pd.to_datetime(df['timestamp_ms'], unit='ms').dt.normalize()
# Recalculate the timestamp_ms to match the normalized 00:00:00 datetime
df['timestamp_ms'] = (df['datetime_utc'].astype('int64') // 10**6)
df.drop_duplicates(subset=['timestamp_ms'], keep='last', inplace=True)
return df[['datetime_utc', 'timestamp_ms', 'market_cap']]
except requests.exceptions.RequestException as e: except requests.exceptions.RequestException as e:
logging.error(f"API request failed for {coin_id}: {e}.") logging.error(f"API request failed for {coin_id}: {e}.")
@ -264,12 +308,6 @@ class MarketCapFetcher:
if __name__ == "__main__": if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Fetch historical market cap data from CoinGecko.") parser = argparse.ArgumentParser(description="Fetch historical market cap data from CoinGecko.")
parser.add_argument(
"--coins",
nargs='+',
default=["BTC", "ETH", "SOL", "BNB", "HYPE", "ASTER", "ZEC", "PUMP", "SUI"],
help="List of coin symbols to fetch (e.g., BTC ETH)."
)
parser.add_argument( parser.add_argument(
"--log-level", "--log-level",
default="normal", default="normal",
@ -278,6 +316,6 @@ if __name__ == "__main__":
) )
args = parser.parse_args() args = parser.parse_args()
fetcher = MarketCapFetcher(log_level=args.log_level, coins=args.coins) fetcher = MarketCapFetcher(log_level=args.log_level)
fetcher.run() fetcher.run()

170
position_manager.py Normal file
View File

@ -0,0 +1,170 @@
import logging
import os
import sys
import json
import time
import multiprocessing
import numpy as np # Import numpy to handle np.float64
from logging_utils import setup_logging
from trade_log import log_trade
class PositionManager:
"""
(Stateless) Listens for EXPLICIT signals (e.g., "OPEN_LONG") from all
strategies and converts them into specific execution orders
(e.g., "market_open") for the TradeExecutor.
It holds NO position state.
"""
def __init__(self, log_level: str, trade_signal_queue: multiprocessing.Queue, order_execution_queue: multiprocessing.Queue):
# Note: Logging is set up by the run_position_manager function
self.trade_signal_queue = trade_signal_queue
self.order_execution_queue = order_execution_queue
# --- REMOVED: All state management ---
logging.info("Position Manager (Stateless) started.")
# --- REMOVED: _load_managed_positions method ---
# --- REMOVED: _save_managed_positions method ---
# --- REMOVED: All tick/rounding/meta logic ---
def send_order(self, agent: str, action: str, coin: str, is_buy: bool, size: float, reduce_only: bool = False, limit_px=None, sl_px=None, tp_px=None):
"""Helper function to put a standardized order onto the execution queue."""
order_data = {
"agent": agent,
"action": action,
"coin": coin,
"is_buy": is_buy,
"size": size,
"reduce_only": reduce_only,
"limit_px": limit_px,
"sl_px": sl_px,
"tp_px": tp_px,
}
logging.info(f"Sending order to executor: {order_data}")
self.order_execution_queue.put(order_data)
def run(self):
"""
Main execution loop. Blocks and waits for a signal from the queue.
Converts explicit strategy signals into execution orders.
"""
logging.info("Position Manager started. Waiting for signals...")
while True:
try:
trade_signal = self.trade_signal_queue.get()
if not trade_signal:
continue
logging.info(f"Received signal: {trade_signal}")
name = trade_signal['strategy_name']
config = trade_signal['config']
params = config['parameters']
coin = trade_signal['coin'].upper()
# --- NEW: The signal is now the explicit action ---
desired_signal = trade_signal['signal']
status = trade_signal
signal_price = status.get('signal_price')
if isinstance(signal_price, np.float64):
signal_price = float(signal_price)
if not signal_price or signal_price <= 0:
logging.warning(f"[{name}] Signal received with invalid or missing price ({signal_price}). Skipping.")
continue
# --- This logic is still needed for copy_trader's nested config ---
# --- But ONLY for finding leverage, not size ---
if 'coins_to_copy' in params:
logging.info(f"[{name}] Detected 'coins_to_copy'. Entering copy_trader logic...")
matching_coin_key = None
for key in params['coins_to_copy'].keys():
if key.upper() == coin:
matching_coin_key = key
break
if matching_coin_key:
coin_specific_config = params['coins_to_copy'][matching_coin_key]
else:
coin_specific_config = {}
# --- REMOVED: size = coin_specific_config.get('size') ---
params['leverage_long'] = coin_specific_config.get('leverage_long', 2)
params['leverage_short'] = coin_specific_config.get('leverage_short', 2)
# --- FIX: Read the size from the ROOT of the trade signal ---
size = trade_signal.get('size')
if not size or size <= 0:
logging.error(f"[{name}] Signal received with no 'size' or invalid size ({size}). Skipping trade.")
continue
# --- END FIX ---
leverage_long = int(params.get('leverage_long', 2))
leverage_short = int(params.get('leverage_short', 2))
agent_name = (config.get("agent") or "default").lower()
logging.info(f"[{name}] Agent set to: {agent_name}")
# --- REMOVED: current_position check ---
# --- Use pure signal_price directly for the limit_px ---
limit_px = signal_price
logging.info(f"[{name}] Using pure signal price for limit_px: {limit_px}")
# --- NEW: Stateless Signal-to-Order Conversion ---
if desired_signal == "OPEN_LONG":
logging.warning(f"[{name}] ACTION: Opening LONG for {coin}.")
# --- REMOVED: Leverage update signal ---
self.send_order(agent_name, "market_open", coin, True, size, limit_px=limit_px)
log_trade(strategy=name, coin=coin, action="OPEN_LONG", price=signal_price, size=size, signal=desired_signal)
elif desired_signal == "OPEN_SHORT":
logging.warning(f"[{name}] ACTION: Opening SHORT for {coin}.")
# --- REMOVED: Leverage update signal ---
self.send_order(agent_name, "market_open", coin, False, size, limit_px=limit_px)
log_trade(strategy=name, coin=coin, action="OPEN_SHORT", price=signal_price, size=size, signal=desired_signal)
elif desired_signal == "CLOSE_LONG":
logging.warning(f"[{name}] ACTION: Closing LONG position for {coin}.")
# A "market_close" for a LONG is a SELL order
self.send_order(agent_name, "market_close", coin, False, size, limit_px=limit_px)
log_trade(strategy=name, coin=coin, action="CLOSE_LONG", price=signal_price, size=size, signal=desired_signal)
elif desired_signal == "CLOSE_SHORT":
logging.warning(f"[{name}] ACTION: Closing SHORT position for {coin}.")
# A "market_close" for a SHORT is a BUY order
self.send_order(agent_name, "market_close", coin, True, size, limit_px=limit_px)
log_trade(strategy=name, coin=coin, action="CLOSE_SHORT", price=signal_price, size=size, signal=desired_signal)
# --- NEW: Handle leverage update signals ---
elif desired_signal == "UPDATE_LEVERAGE_LONG":
logging.warning(f"[{name}] ACTION: Updating LONG leverage for {coin} to {size}x")
# 'size' field holds the leverage value for this signal
self.send_order(agent_name, "update_leverage", coin, True, size)
elif desired_signal == "UPDATE_LEVERAGE_SHORT":
logging.warning(f"[{name}] ACTION: Updating SHORT leverage for {coin} to {size}x")
# 'size' field holds the leverage value for this signal
self.send_order(agent_name, "update_leverage", coin, False, size)
else:
logging.warning(f"[{name}] Received unknown signal '{desired_signal}'. No action taken.")
# --- REMOVED: _save_managed_positions() ---
except Exception as e:
logging.error(f"An error occurred in the position manager loop: {e}", exc_info=True)
time.sleep(1)
# This script is no longer run directly, but is called by main_app.py

159
position_monitor.py Normal file
View File

@ -0,0 +1,159 @@
import os
import sys
import time
import json
import argparse
from datetime import datetime, timezone
from hyperliquid.info import Info
from hyperliquid.utils import constants
from dotenv import load_dotenv
import logging
from logging_utils import setup_logging
# Load .env file
load_dotenv()
class PositionMonitor:
"""
A standalone, read-only dashboard for monitoring all open perpetuals
positions, spot balances, and their associated strategies.
"""
def __init__(self, log_level: str):
setup_logging(log_level, 'PositionMonitor')
self.wallet_address = os.environ.get("MAIN_WALLET_ADDRESS")
if not self.wallet_address:
logging.error("MAIN_WALLET_ADDRESS not set in .env file. Cannot proceed.")
sys.exit(1)
self.info = Info(constants.MAINNET_API_URL, skip_ws=True)
self.managed_positions_path = os.path.join("_data", "executor_managed_positions.json")
self._lines_printed = 0
logging.info(f"Monitoring vault address: {self.wallet_address}")
def load_managed_positions(self) -> dict:
"""Loads the state of which strategy manages which position."""
if os.path.exists(self.managed_positions_path):
try:
with open(self.managed_positions_path, 'r') as f:
# Create a reverse map: {coin: strategy_name}
data = json.load(f)
return {v['coin']: k for k, v in data.items()}
except (IOError, json.JSONDecodeError):
logging.warning("Could not read managed positions file.")
return {}
def run(self):
"""Main loop to continuously refresh the dashboard."""
try:
while True:
self.display_dashboard()
time.sleep(5) # Refresh every 5 seconds
except KeyboardInterrupt:
logging.info("Position monitor stopped.")
def display_dashboard(self):
"""Fetches all data and draws the dashboard without blinking."""
if self._lines_printed > 0:
print(f"\x1b[{self._lines_printed}A", end="")
output_lines = []
try:
perp_state = self.info.user_state(self.wallet_address)
spot_state = self.info.spot_user_state(self.wallet_address)
coin_to_strategy_map = self.load_managed_positions()
output_lines.append(f"--- Live Position Monitor for {self.wallet_address[:6]}...{self.wallet_address[-4:]} ---")
# --- 1. Perpetuals Account Summary ---
margin_summary = perp_state.get('marginSummary', {})
account_value = float(margin_summary.get('accountValue', 0))
margin_used = float(margin_summary.get('totalMarginUsed', 0))
utilization = (margin_used / account_value) * 100 if account_value > 0 else 0
output_lines.append("\n--- Perpetuals Account Summary ---")
output_lines.append(f" Account Value: ${account_value:,.2f} | Margin Used: ${margin_used:,.2f} | Utilization: {utilization:.2f}%")
# --- 2. Spot Balances Summary ---
output_lines.append("\n--- Spot Balances ---")
spot_balances = spot_state.get('balances', [])
if not spot_balances:
output_lines.append(" No spot balances found.")
else:
balances_str = ", ".join([f"{b.get('coin')}: {float(b.get('total', 0)):,.4f}" for b in spot_balances if float(b.get('total', 0)) > 0])
output_lines.append(f" {balances_str}")
# --- 3. Open Positions Table ---
output_lines.append("\n--- Open Perpetual Positions ---")
positions = perp_state.get('assetPositions', [])
open_positions = [p for p in positions if p.get('position') and float(p['position'].get('szi', 0)) != 0]
if not open_positions:
output_lines.append(" No open perpetual positions found.")
output_lines.append("") # Add a line for stable refresh
else:
self.build_positions_table(open_positions, coin_to_strategy_map, output_lines)
except Exception as e:
output_lines = [f"An error occurred: {e}"]
final_output = "\n".join(output_lines) + "\n\x1b[J" # \x1b[J clears to end of screen
print(final_output, end="")
self._lines_printed = len(output_lines)
sys.stdout.flush()
def build_positions_table(self, positions: list, coin_to_strategy_map: dict, output_lines: list):
"""Builds the text for the positions summary table."""
header = f"| {'Strategy':<25} | {'Coin':<6} | {'Side':<5} | {'Size':>15} | {'Entry Price':>12} | {'Mark Price':>12} | {'PNL':>15} | {'Leverage':>10} |"
output_lines.append(header)
output_lines.append("-" * len(header))
for position in positions:
pos = position.get('position', {})
coin = pos.get('coin', 'Unknown')
size = float(pos.get('szi', 0))
entry_px = float(pos.get('entryPx', 0))
mark_px = float(pos.get('markPx', 0))
unrealized_pnl = float(pos.get('unrealizedPnl', 0))
# Get leverage
position_value = float(pos.get('positionValue', 0))
margin_used = float(pos.get('marginUsed', 0))
leverage = (position_value / margin_used) if margin_used > 0 else 0
side_text = "LONG" if size > 0 else "SHORT"
pnl_sign = "+" if unrealized_pnl >= 0 else ""
# Find the strategy that owns this coin
strategy_name = coin_to_strategy_map.get(coin, "Unmanaged")
# Format all values as strings
strategy_str = f"{strategy_name:<25}"
coin_str = f"{coin:<6}"
side_str = f"{side_text:<5}"
size_str = f"{size:>15.4f}"
entry_str = f"${entry_px:>11,.2f}"
mark_str = f"${mark_px:>11,.2f}"
pnl_str = f"{pnl_sign}${unrealized_pnl:>14,.2f}"
lev_str = f"{leverage:>9.1f}x"
output_lines.append(f"| {strategy_str} | {coin_str} | {side_str} | {size_str} | {entry_str} | {mark_str} | {pnl_str} | {lev_str} |")
output_lines.append("-" * len(header))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Monitor a Hyperliquid wallet's positions in real-time.")
parser.add_argument(
"--log-level",
default="normal",
choices=['off', 'normal', 'debug'],
help="Set the logging level for the script."
)
args = parser.parse_args()
monitor = PositionMonitor(log_level=args.log_level)
monitor.run()

View File

@ -37,7 +37,7 @@ class Resampler:
def _ensure_tables_exist(self): def _ensure_tables_exist(self):
""" """
Ensures all resampled tables exist with a PRIMARY KEY on datetime_utc. Ensures all resampled tables exist with a PRIMARY KEY on timestamp_ms.
Attempts to migrate existing tables if the schema is incorrect. Attempts to migrate existing tables if the schema is incorrect.
""" """
with sqlite3.connect(self.db_path) as conn: with sqlite3.connect(self.db_path) as conn:
@ -48,13 +48,22 @@ class Resampler:
cursor.execute(f"PRAGMA table_info('{table_name}')") cursor.execute(f"PRAGMA table_info('{table_name}')")
columns = cursor.fetchall() columns = cursor.fetchall()
if columns: if columns:
pk_found = any(col[1] == 'datetime_utc' and col[5] == 1 for col in columns) # --- FIX: Check for the correct PRIMARY KEY on timestamp_ms ---
pk_found = any(col[1] == 'timestamp_ms' and col[5] == 1 for col in columns)
if not pk_found: if not pk_found:
logging.warning(f"Schema migration needed for table '{table_name}'.") logging.warning(f"Schema migration needed for table '{table_name}'.")
try: try:
conn.execute(f'ALTER TABLE "{table_name}" RENAME TO "{table_name}_old"') conn.execute(f'ALTER TABLE "{table_name}" RENAME TO "{table_name}_old"')
self._create_resampled_table(conn, table_name) self._create_resampled_table(conn, table_name)
conn.execute(f'INSERT OR IGNORE INTO "{table_name}" SELECT * FROM "{table_name}_old"') # Copy data, ensuring to create the timestamp_ms
logging.info(f" -> Migrating data for '{table_name}'...")
old_df = pd.read_sql(f'SELECT * FROM "{table_name}_old"', conn, parse_dates=['datetime_utc'])
if not old_df.empty:
old_df['timestamp_ms'] = (old_df['datetime_utc'].astype('int64') // 10**6)
# Keep only unique timestamps, preserving the last entry
old_df.drop_duplicates(subset=['timestamp_ms'], keep='last', inplace=True)
old_df.to_sql(table_name, conn, if_exists='append', index=False)
logging.info(f" -> Data migration complete.")
conn.execute(f'DROP TABLE "{table_name}_old"') conn.execute(f'DROP TABLE "{table_name}_old"')
conn.commit() conn.commit()
logging.info(f"Successfully migrated schema for '{table_name}'.") logging.info(f"Successfully migrated schema for '{table_name}'.")
@ -67,9 +76,11 @@ class Resampler:
def _create_resampled_table(self, conn, table_name): def _create_resampled_table(self, conn, table_name):
"""Creates a new resampled table with the correct schema.""" """Creates a new resampled table with the correct schema."""
# --- FIX: Set PRIMARY KEY on timestamp_ms for performance and uniqueness ---
conn.execute(f''' conn.execute(f'''
CREATE TABLE "{table_name}" ( CREATE TABLE "{table_name}" (
datetime_utc TEXT PRIMARY KEY, datetime_utc TEXT,
timestamp_ms INTEGER PRIMARY KEY,
open REAL, open REAL,
high REAL, high REAL,
low REAL, low REAL,
@ -123,22 +134,21 @@ class Resampler:
source_table_name = f"{coin}_1m" source_table_name = f"{coin}_1m"
logging.debug(f" Updating {tf_name} table...") logging.debug(f" Updating {tf_name} table...")
last_timestamp = self._get_last_timestamp(conn, target_table_name) last_timestamp_ms = self._get_last_timestamp(conn, target_table_name)
query = f'SELECT * FROM "{source_table_name}"' query = f'SELECT * FROM "{source_table_name}"'
params = () params = ()
if last_timestamp: if last_timestamp_ms:
query += ' WHERE datetime_utc >= ?' query += ' WHERE timestamp_ms >= ?'
# Go back one interval to rebuild the last (potentially partial) candle
try: try:
# --- FIX: Try the fast method first --- interval_delta_ms = pd.to_timedelta(tf_code).total_seconds() * 1000
interval_delta = pd.to_timedelta(tf_code)
query_start_date = last_timestamp - interval_delta
except ValueError: except ValueError:
# --- FIX: Fall back to the safe method for special timeframes --- # Fall back to a safe 32-day lookback for special timeframes
logging.debug(f"Cannot create timedelta for '{tf_code}'. Using safe 32-day lookback.") interval_delta_ms = timedelta(days=32).total_seconds() * 1000
query_start_date = last_timestamp - timedelta(days=32)
params = (query_start_date.strftime('%Y-%m-%d %H:%M:%S'),) query_start_ms = last_timestamp_ms - interval_delta_ms
params = (query_start_ms,)
df_1m = pd.read_sql(query, conn, params=params, parse_dates=['datetime_utc']) df_1m = pd.read_sql(query, conn, params=params, parse_dates=['datetime_utc'])
@ -155,14 +165,15 @@ class Resampler:
for index, row in resampled_df.iterrows(): for index, row in resampled_df.iterrows():
records_to_upsert.append(( records_to_upsert.append((
index.strftime('%Y-%m-%d %H:%M:%S'), index.strftime('%Y-%m-%d %H:%M:%S'),
int(index.timestamp() * 1000), # Generate timestamp_ms
row['open'], row['high'], row['low'], row['close'], row['open'], row['high'], row['low'], row['close'],
row['volume'], row['number_of_trades'] row['volume'], row['number_of_trades']
)) ))
cursor = conn.cursor() cursor = conn.cursor()
cursor.executemany(f''' cursor.executemany(f'''
INSERT OR REPLACE INTO "{target_table_name}" (datetime_utc, open, high, low, close, volume, number_of_trades) INSERT OR REPLACE INTO "{target_table_name}" (datetime_utc, timestamp_ms, open, high, low, close, volume, number_of_trades)
VALUES (?, ?, ?, ?, ?, ?, ?) VALUES (?, ?, ?, ?, ?, ?, ?, ?)
''', records_to_upsert) ''', records_to_upsert)
conn.commit() conn.commit()
@ -203,10 +214,11 @@ class Resampler:
logging.info(f" - {tf_name:<10}: {total:,} candles") logging.info(f" - {tf_name:<10}: {total:,} candles")
def _get_last_timestamp(self, conn, table_name): def _get_last_timestamp(self, conn, table_name):
"""Gets the timestamp of the last entry in a table as a pandas Timestamp.""" """Gets the millisecond timestamp of the last entry in a table."""
try: try:
timestamp_str = pd.read_sql(f'SELECT MAX(datetime_utc) FROM "{table_name}"', conn).iloc[0, 0] # --- FIX: Query for the integer timestamp_ms, not the text datetime_utc ---
return pd.to_datetime(timestamp_str) if timestamp_str else None timestamp_ms = pd.read_sql(f'SELECT MAX(timestamp_ms) FROM "{table_name}"', conn).iloc[0, 0]
return int(timestamp_ms) if pd.notna(timestamp_ms) else None
except (pd.io.sql.DatabaseError, IndexError): except (pd.io.sql.DatabaseError, IndexError):
return None return None

View File

@ -5,36 +5,42 @@ import os
import logging import logging
from datetime import datetime, timezone from datetime import datetime, timezone
import sqlite3 import sqlite3
import multiprocessing
import time
from logging_utils import setup_logging
from hyperliquid.info import Info
from hyperliquid.utils import constants
class BaseStrategy(ABC): class BaseStrategy(ABC):
""" """
An abstract base class that defines the blueprint for all trading strategies. An abstract base class that defines the blueprint for all trading strategies.
It provides common functionality like loading data and saving status. It provides common functionality like loading data, saving status, and state management.
""" """
def __init__(self, strategy_name: str, params: dict, log_level: str): def __init__(self, strategy_name: str, params: dict, trade_signal_queue: multiprocessing.Queue = None, shared_status: dict = None):
self.strategy_name = strategy_name self.strategy_name = strategy_name
self.params = params self.params = params
self.trade_signal_queue = trade_signal_queue
# Optional multiprocessing.Manager().dict() to hold live status (avoids file IO)
self.shared_status = shared_status
self.coin = params.get("coin", "N/A") self.coin = params.get("coin", "N/A")
self.timeframe = params.get("timeframe", "N/A") self.timeframe = params.get("timeframe", "N/A")
self.db_path = os.path.join("_data", "market_data.db") self.db_path = os.path.join("_data", "market_data.db")
self.status_file_path = os.path.join("_data", f"strategy_status_{self.strategy_name}.json") self.status_file_path = os.path.join("_data", f"strategy_status_{self.strategy_name}.json")
# --- ADDED: State variables required for status reporting ---
self.current_signal = "INIT" self.current_signal = "INIT"
self.last_signal_change_utc = None self.last_signal_change_utc = None
self.signal_price = None self.signal_price = None
# This will be set up by the child class after it's initialized # Note: Logging is set up by the run_strategy function
# setup_logging(log_level, f"Strategy-{self.strategy_name}")
# logging.info(f"Initializing with parameters: {self.params}")
def load_data(self) -> pd.DataFrame: def load_data(self) -> pd.DataFrame:
"""Loads historical data for the configured coin and timeframe.""" """Loads historical data for the configured coin and timeframe."""
table_name = f"{self.coin}_{self.timeframe}" table_name = f"{self.coin}_{self.timeframe}"
# Dynamically determine the number of candles needed based on all possible period parameters periods = [v for k, v in self.params.items() if 'period' in k or '_ma' in k or 'slow' in k or 'fast' in k]
periods = [v for k, v in self.params.items() if 'period' in k or '_ma' in k or 'slow' in k]
limit = max(periods) + 50 if periods else 500 limit = max(periods) + 50 if periods else 500
try: try:
@ -51,11 +57,45 @@ class BaseStrategy(ABC):
@abstractmethod @abstractmethod
def calculate_signals(self, df: pd.DataFrame) -> pd.DataFrame: def calculate_signals(self, df: pd.DataFrame) -> pd.DataFrame:
""" """The core logic of the strategy. Must be implemented by child classes."""
The core logic of the strategy. Must be implemented by child classes.
"""
pass pass
def calculate_signals_and_state(self, df: pd.DataFrame) -> bool:
"""
A wrapper that calls the strategy's signal calculation, determines
the last signal change, and returns True if the signal has changed.
"""
df_with_signals = self.calculate_signals(df)
df_with_signals.dropna(inplace=True)
if df_with_signals.empty:
return False
df_with_signals['position_change'] = df_with_signals['signal'].diff()
last_signal_int = df_with_signals['signal'].iloc[-1]
new_signal_str = "HOLD"
if last_signal_int == 1: new_signal_str = "BUY"
elif last_signal_int == -1: new_signal_str = "SELL"
signal_changed = False
if self.current_signal == "INIT":
if new_signal_str == "BUY": self.current_signal = "INIT_BUY"
elif new_signal_str == "SELL": self.current_signal = "INIT_SELL"
else: self.current_signal = "HOLD"
signal_changed = True
elif new_signal_str != self.current_signal:
self.current_signal = new_signal_str
signal_changed = True
if signal_changed:
last_change_series = df_with_signals[df_with_signals['position_change'] != 0]
if not last_change_series.empty:
last_change_row = last_change_series.iloc[-1]
self.last_signal_change_utc = last_change_row.name.tz_localize('UTC').isoformat()
self.signal_price = last_change_row['close']
return signal_changed
def _save_status(self): def _save_status(self):
"""Saves the current strategy state to its JSON file.""" """Saves the current strategy state to its JSON file."""
status = { status = {
@ -65,9 +105,62 @@ class BaseStrategy(ABC):
"signal_price": self.signal_price, "signal_price": self.signal_price,
"last_checked_utc": datetime.now(timezone.utc).isoformat() "last_checked_utc": datetime.now(timezone.utc).isoformat()
} }
# If a shared status dict is provided (Manager.dict()), update it instead of writing files
try: try:
if self.shared_status is not None:
try:
# store the status under the strategy name for easy lookup
self.shared_status[self.strategy_name] = status
except Exception:
# Manager proxies may not accept nested mutable objects consistently; assign a copy
self.shared_status[self.strategy_name] = dict(status)
else:
with open(self.status_file_path, 'w', encoding='utf-8') as f: with open(self.status_file_path, 'w', encoding='utf-8') as f:
json.dump(status, f, indent=4) json.dump(status, f, indent=4)
except IOError as e: except IOError as e:
logging.error(f"Failed to write status file for {self.strategy_name}: {e}") logging.error(f"Failed to write status file for {self.strategy_name}: {e}")
def run_polling_loop(self):
"""
The default execution loop for polling-based strategies (e.g., SMAs).
"""
while True:
df = self.load_data()
if df.empty:
logging.warning("No data loaded. Waiting 1 minute...")
time.sleep(60)
continue
signal_changed = self.calculate_signals_and_state(df.copy())
self._save_status()
if signal_changed or self.current_signal == "INIT_BUY" or self.current_signal == "INIT_SELL":
logging.warning(f"New signal detected: {self.current_signal}")
self.trade_signal_queue.put({
"strategy_name": self.strategy_name,
"signal": self.current_signal,
"coin": self.coin,
"signal_price": self.signal_price,
"config": {"agent": self.params.get("agent"), "parameters": self.params}
})
if self.current_signal == "INIT_BUY": self.current_signal = "BUY"
if self.current_signal == "INIT_SELL": self.current_signal = "SELL"
logging.info(f"Current Signal: {self.current_signal}")
time.sleep(60)
def run_event_loop(self):
"""
A placeholder for event-driven (WebSocket) strategies.
Child classes must override this.
"""
logging.error("run_event_loop() is not implemented for this strategy.")
time.sleep(3600) # Sleep for an hour to prevent rapid error loops
def on_fill_message(self, message):
"""
Placeholder for the WebSocket callback.
Child classes must override this.
"""
pass

View File

@ -0,0 +1,353 @@
import logging
import time
import json
import os
from datetime import datetime, timezone
from hyperliquid.info import Info
from hyperliquid.utils import constants
from strategies.base_strategy import BaseStrategy
class CopyTraderStrategy(BaseStrategy):
"""
An event-driven strategy that monitors a target wallet address and
copies its trades for a specific set of allowed coins.
This strategy is STATELESS. It translates a target's fill direction
(e.g., "Open Long") directly into an explicit signal
(e.g., "OPEN_LONG") for the PositionManager.
"""
def __init__(self, strategy_name: str, params: dict, trade_signal_queue, shared_status: dict = None):
# --- MODIFIED: Pass the correct queue to the parent ---
# The event-driven copy trader should send orders to the order_execution_queue
# We will assume the queue passed in is the correct one (as setup in main_app.py)
super().__init__(strategy_name, params, trade_signal_queue, shared_status)
self.target_address = self.params.get("target_address", "").lower()
self.coins_to_copy = self.params.get("coins_to_copy", {})
# Convert all coin keys to uppercase for consistency
self.coins_to_copy = {k.upper(): v for k, v in self.coins_to_copy.items()}
self.allowed_coins = list(self.coins_to_copy.keys())
if not self.target_address:
logging.error("No 'target_address' specified in parameters for copy trader.")
raise ValueError("target_address is required")
if not self.allowed_coins:
logging.warning("No 'coins_to_copy' configured. This strategy will not copy any trades.")
self.info = None # Will be initialized in the run loop
# --- REMOVED: All local state management ---
# self.position_state_file = ...
# self.current_positions = ...
# --- MODIFIED: Check if shared_status is None before using it ---
if self.shared_status is None:
logging.warning("No shared_status dictionary provided. Initializing a new one.")
self.shared_status = {}
self.current_signal = self.shared_status.get("current_signal", "WAIT")
self.signal_price = self.shared_status.get("signal_price")
self.last_signal_change_utc = self.shared_status.get("last_signal_change_utc")
self.start_time_utc = datetime.now(timezone.utc)
logging.info(f"Strategy initialized. Ignoring all trades before {self.start_time_utc.isoformat()}")
# --- REMOVED: _load_position_state ---
# --- REMOVED: _save_position_state ---
def calculate_signals(self, df):
# This strategy is event-driven, so it does not use polling-based signal calculation.
pass
def send_explicit_signal(self, signal: str, coin: str, price: float, trade_params: dict, size: float):
"""Helper to send a formatted signal to the PositionManager."""
config = {
# --- MODIFIED: Ensure agent is read from params ---
"agent": self.params.get("agent"),
"parameters": trade_params
}
# --- MODIFIED: Use self.trade_signal_queue (which is the queue passed in) ---
self.trade_signal_queue.put({
"strategy_name": self.strategy_name,
"signal": signal, # e.g., "OPEN_LONG", "CLOSE_SHORT"
"coin": coin,
"signal_price": price,
"config": config,
"size": size # Explicitly pass size (or leverage for leverage updates)
})
logging.info(f"Explicit signal SENT: {signal} {coin} @ {price}, Size: {size}")
def on_fill_message(self, message):
"""
This is the callback function that gets triggered by the WebSocket
every time the monitored address has an event.
"""
try:
# --- NEW: Add logging to see ALL messages ---
logging.debug(f"Received WebSocket message: {message}")
channel = message.get("channel")
if channel not in ("user", "userFills", "userEvents"):
# --- NEW: Added debug logging ---
logging.debug(f"Ignoring message from unhandled channel: {channel}")
return
data = message.get("data")
if not data:
# --- NEW: Added debug logging ---
logging.debug("Message received with no 'data' field. Ignoring.")
return
# --- NEW: Check for user address FIRST ---
user_address = data.get("user", "").lower()
if not user_address:
logging.debug("Received message with 'data' but no 'user'. Ignoring.")
return
# --- MODIFIED: Check for 'fills' vs. other event types ---
# This check is still valid for userFills
if "fills" not in data or not data.get("fills"):
# This is a userEvent, but not a fill (e.g., order placement, cancel, withdrawal)
event_type = data.get("type") # e.g., 'order', 'cancel', 'withdrawal'
if event_type:
logging.debug(f"Received non-fill user event: '{event_type}'. Ignoring.")
else:
logging.debug(f"Received 'data' message with no 'fills'. Ignoring.")
return
# --- This line is now safe to run ---
if user_address != self.target_address:
# This shouldn't happen if the subscription is correct, but good to check
logging.warning(f"Received fill for wrong user: {user_address}")
return
fills = data.get("fills")
logging.debug(f"Received {len(fills)} fill(s) for user {user_address}")
for fill in fills:
# Check if the trade is new or historical
trade_time = datetime.fromtimestamp(fill['time'] / 1000, tz=timezone.utc)
if trade_time < self.start_time_utc:
logging.info(f"Ignoring stale/historical trade from {trade_time.isoformat()}")
continue
coin = fill.get('coin').upper()
if coin in self.allowed_coins:
price = float(fill.get('px'))
# --- MODIFIED: Use the target's fill size ---
fill_size = float(fill.get('sz')) # Target's size
if fill_size == 0:
logging.warning(f"Ignoring fill with size 0.")
continue
# --- NEW: Get the fill direction ---
# "dir": "Open Long", "Close Long", "Open Short", "Close Short"
fill_direction = fill.get("dir")
# --- NEW: Get startPosition to calculate flip sizes ---
start_pos_size = float(fill.get('startPosition', 0.0))
if not fill_direction:
logging.warning(f"Fill message missing 'dir'. Ignoring fill: {fill}")
continue
# Get our strategy's configured leverage for this coin
coin_config = self.coins_to_copy.get(coin)
# --- REMOVED: Check for coin_config.get("size") ---
# --- REMOVED: strategy_trade_size = coin_config.get("size") ---
# Prepare config for the signal
trade_params = self.params.copy()
if coin_config:
trade_params.update(coin_config)
# --- REMOVED: All stateful logic (current_local_pos, etc.) ---
# --- MODIFIED: Expanded logic to handle flip directions ---
signal_sent = False
dashboard_signal = ""
if fill_direction == "Open Long":
logging.warning(f"[{coin}] Target action: {fill_direction}. Sending signal: OPEN_LONG")
self.send_explicit_signal("OPEN_LONG", coin, price, trade_params, fill_size)
signal_sent = True
dashboard_signal = "OPEN_LONG"
elif fill_direction == "Close Long":
logging.warning(f"[{coin}] Target action: {fill_direction}. Sending signal: CLOSE_LONG")
self.send_explicit_signal("CLOSE_LONG", coin, price, trade_params, fill_size)
signal_sent = True
dashboard_signal = "CLOSE_LONG"
elif fill_direction == "Open Short":
logging.warning(f"[{coin}] Target action: {fill_direction}. Sending signal: OPEN_SHORT")
self.send_explicit_signal("OPEN_SHORT", coin, price, trade_params, fill_size)
signal_sent = True
dashboard_signal = "OPEN_SHORT"
elif fill_direction == "Close Short":
logging.warning(f"[{coin}] Target action: {fill_direction}. Sending signal: CLOSE_SHORT")
self.send_explicit_signal("CLOSE_SHORT", coin, price, trade_params, fill_size)
signal_sent = True
dashboard_signal = "CLOSE_SHORT"
elif fill_direction == "Short > Long":
logging.warning(f"[{coin}] Target action: {fill_direction}. Sending CLOSE_SHORT then OPEN_LONG.")
close_size = abs(start_pos_size)
open_size = fill_size - close_size
if close_size > 0:
self.send_explicit_signal("CLOSE_SHORT", coin, price, trade_params, close_size)
if open_size > 0:
self.send_explicit_signal("OPEN_LONG", coin, price, trade_params, open_size)
signal_sent = True
dashboard_signal = "FLIP_TO_LONG"
elif fill_direction == "Long > Short":
logging.warning(f"[{coin}] Target action: {fill_direction}. Sending CLOSE_LONG then OPEN_SHORT.")
close_size = abs(start_pos_size)
open_size = fill_size - close_size
if close_size > 0:
self.send_explicit_signal("CLOSE_LONG", coin, price, trade_params, close_size)
if open_size > 0:
self.send_explicit_signal("OPEN_SHORT", coin, price, trade_params, open_size)
signal_sent = True
dashboard_signal = "FLIP_TO_SHORT"
if signal_sent:
# Update dashboard status
self.current_signal = dashboard_signal # Show the action
self.signal_price = price
self.last_signal_change_utc = trade_time.isoformat()
self.coin = coin # Update coin for dashboard
self.size = fill_size # Update size for dashboard
self._save_status() # For dashboard
logging.info(f"Source trade logged: {json.dumps(fill)}")
else:
logging.info(f"[{coin}] Ignoring unhandled fill direction: {fill_direction}")
else:
logging.info(f"Ignoring fill for unmonitored coin: {coin}")
except Exception as e:
logging.error(f"Error in on_fill_message: {e}", exc_info=True)
def _connect_and_subscribe(self):
"""
Establishes a new WebSocket connection and subscribes to the userFills channel.
"""
try:
logging.info("Connecting to Hyperliquid WebSocket...")
self.info = Info(constants.MAINNET_API_URL, skip_ws=False)
# --- MODIFIED: Reverted to 'userFills' as requested ---
subscription = {"type": "userFills", "user": self.target_address}
self.info.subscribe(subscription, self.on_fill_message)
logging.info(f"Subscribed to 'userFills' for target address: {self.target_address}")
return True
except Exception as e:
logging.error(f"Failed to connect or subscribe: {e}")
self.info = None
return False
def run_event_loop(self):
"""
This method overrides the default polling loop. It establishes a
persistent WebSocket connection and runs a watchdog to ensure
it stays connected.
"""
try:
if not self._connect_and_subscribe():
# If connection fails on start, wait 60s before letting the process restart
time.sleep(60)
return
# --- MODIFIED: Add a small delay to ensure Info object is ready for REST calls ---
logging.info("Connection established. Waiting 2 seconds for Info client to be ready...")
time.sleep(2)
# --- END MODIFICATION ---
# --- NEW: Set initial leverage for all monitored coins ---
logging.info("Setting initial leverage for all monitored coins...")
try:
all_mids = self.info.all_mids()
for coin_key, coin_config in self.coins_to_copy.items():
coin = coin_key.upper()
# Use a failsafe price of 1.0 if coin not in mids (e.g., new listing)
current_price = float(all_mids.get(coin, 1.0))
leverage_long = coin_config.get('leverage_long', 2)
leverage_short = coin_config.get('leverage_short', 2)
# Prepare config for the signal
trade_params = self.params.copy()
trade_params.update(coin_config)
# Send LONG leverage update
# The 'size' param is used to pass the leverage value for this signal type
self.send_explicit_signal("UPDATE_LEVERAGE_LONG", coin, current_price, trade_params, leverage_long)
# Send SHORT leverage update
self.send_explicit_signal("UPDATE_LEVERAGE_SHORT", coin, current_price, trade_params, leverage_short)
logging.info(f"Sent initial leverage signals for {coin} (Long: {leverage_long}x, Short: {leverage_short}x)")
except Exception as e:
logging.error(f"Failed to set initial leverage: {e}", exc_info=True)
# --- END NEW LEVERAGE LOGIC ---
# Save the initial "WAIT" status
self._save_status()
while True:
try:
time.sleep(15) # Check the connection every 15 seconds
if self.info is None or not self.info.ws_manager.is_alive():
logging.error(f"WebSocket connection lost. Attempting to reconnect...")
if self.info and self.info.ws_manager:
try:
self.info.ws_manager.stop()
except Exception as e:
logging.error(f"Error stopping old ws_manager: {e}")
if not self._connect_and_subscribe():
logging.error("Reconnect failed, will retry in 15s.")
else:
logging.info("Successfully reconnected to WebSocket.")
self._save_status()
else:
logging.debug("Watchdog check: WebSocket connection is active.")
except Exception as e:
logging.error(f"An error occurred in the watchdog loop: {e}", exc_info=True)
except KeyboardInterrupt:
# --- MODIFIED: No positions to close, just exit ---
logging.warning(f"Shutdown signal received. Exiting strategy '{self.strategy_name}'.")
except Exception as e:
logging.error(f"An unhandled error occurred in run_event_loop: {e}", exc_info=True)
finally:
if self.info and self.info.ws_manager and self.info.ws_manager.is_alive():
try:
self.info.ws_manager.stop()
logging.info("WebSocket connection stopped.")
except Exception as e:
logging.error(f"Error stopping ws_manager on exit: {e}")

View File

@ -7,27 +7,22 @@ class MaCrossStrategy(BaseStrategy):
A strategy based on a fast Simple Moving Average (SMA) crossing A strategy based on a fast Simple Moving Average (SMA) crossing
a slow SMA. a slow SMA.
""" """
# --- FIX: Changed 3rd argument from log_level to trade_signal_queue ---
def __init__(self, strategy_name: str, params: dict, trade_signal_queue):
# --- FIX: Passed trade_signal_queue to the parent class ---
super().__init__(strategy_name, params, trade_signal_queue)
self.fast_ma_period = self.params.get('short_ma') or self.params.get('fast') or 0
self.slow_ma_period = self.params.get('long_ma') or self.params.get('slow') or 0
def calculate_signals(self, df: pd.DataFrame) -> pd.DataFrame: def calculate_signals(self, df: pd.DataFrame) -> pd.DataFrame:
# Support multiple naming conventions: some configs use 'fast'/'slow' if not self.fast_ma_period or not self.slow_ma_period or len(df) < self.slow_ma_period:
# while others use 'short_ma'/'long_ma'. Normalize here so both work. logging.warning(f"Not enough data for MA periods.")
fast_ma_period = self.params.get('short_ma') or self.params.get('fast') or 0
slow_ma_period = self.params.get('long_ma') or self.params.get('slow') or 0
# If parameters are missing, return a neutral signal frame.
if not fast_ma_period or not slow_ma_period:
logging.warning(f"Missing MA period parameters (fast={fast_ma_period}, slow={slow_ma_period}).")
df['signal'] = 0 df['signal'] = 0
return df return df
if len(df) < slow_ma_period: df['fast_sma'] = df['close'].rolling(window=self.fast_ma_period).mean()
logging.warning(f"Not enough data for MA periods {fast_ma_period}/{slow_ma_period}. Need {slow_ma_period}, have {len(df)}.") df['slow_sma'] = df['close'].rolling(window=self.slow_ma_period).mean()
df['signal'] = 0
return df
df['fast_sma'] = df['close'].rolling(window=fast_ma_period).mean()
df['slow_sma'] = df['close'].rolling(window=slow_ma_period).mean()
# Signal is 1 for Golden Cross (fast > slow), -1 for Death Cross
df['signal'] = 0 df['signal'] = 0
df.loc[df['fast_sma'] > df['slow_sma'], 'signal'] = 1 df.loc[df['fast_sma'] > df['slow_sma'], 'signal'] = 1
df.loc[df['fast_sma'] < df['slow_sma'], 'signal'] = -1 df.loc[df['fast_sma'] < df['slow_sma'], 'signal'] = -1

View File

@ -6,17 +6,20 @@ class SingleSmaStrategy(BaseStrategy):
""" """
A strategy based on the price crossing a single Simple Moving Average (SMA). A strategy based on the price crossing a single Simple Moving Average (SMA).
""" """
def calculate_signals(self, df: pd.DataFrame) -> pd.DataFrame: # --- FIX: Added trade_signal_queue to the constructor ---
sma_period = self.params.get('sma_period', 0) def __init__(self, strategy_name: str, params: dict, trade_signal_queue):
# --- FIX: Passed trade_signal_queue to the parent class ---
super().__init__(strategy_name, params, trade_signal_queue)
self.sma_period = self.params.get('sma_period', 0)
if not sma_period or len(df) < sma_period: def calculate_signals(self, df: pd.DataFrame) -> pd.DataFrame:
logging.warning(f"Not enough data for SMA period {sma_period}. Need {sma_period}, have {len(df)}.") if not self.sma_period or len(df) < self.sma_period:
logging.warning(f"Not enough data for SMA period {self.sma_period}.")
df['signal'] = 0 df['signal'] = 0
return df return df
df['sma'] = df['close'].rolling(window=sma_period).mean() df['sma'] = df['close'].rolling(window=self.sma_period).mean()
# Signal is 1 when price is above SMA, -1 when below
df['signal'] = 0 df['signal'] = 0
df.loc[df['close'] > df['sma'], 'signal'] = 1 df.loc[df['close'] > df['sma'], 'signal'] = 1
df.loc[df['close'] < df['sma'], 'signal'] = -1 df.loc[df['close'] < df['sma'], 'signal'] = -1

View File

@ -4,7 +4,9 @@ import os
import sys import sys
import json import json
import time import time
# --- REVERTED: Removed math import ---
from datetime import datetime from datetime import datetime
import multiprocessing
from eth_account import Account from eth_account import Account
from hyperliquid.exchange import Exchange from hyperliquid.exchange import Exchange
@ -13,20 +15,20 @@ from hyperliquid.utils import constants
from dotenv import load_dotenv from dotenv import load_dotenv
from logging_utils import setup_logging from logging_utils import setup_logging
from trade_log import log_trade
# Load environment variables from a .env file
load_dotenv() load_dotenv()
class TradeExecutor: class TradeExecutor:
""" """
Monitors strategy signals and executes trades using a multi-agent, Executes orders from a queue and, upon API success,
multi-strategy position management system. Each strategy's position is updates the shared 'opened_positions.json' state file.
tracked independently. It is the single source of truth for position state.
""" """
def __init__(self, log_level: str): def __init__(self, log_level: str, order_execution_queue: multiprocessing.Queue):
setup_logging(log_level, 'TradeExecutor') # Note: Logging is set up by the run_trade_executor function
self.order_execution_queue = order_execution_queue
self.vault_address = os.environ.get("MAIN_WALLET_ADDRESS") self.vault_address = os.environ.get("MAIN_WALLET_ADDRESS")
if not self.vault_address: if not self.vault_address:
@ -39,21 +41,18 @@ class TradeExecutor:
logging.error("No trading agents found in .env file.") logging.error("No trading agents found in .env file.")
sys.exit(1) sys.exit(1)
strategy_config_path = os.path.join("_data", "strategies.json") # --- REVERTED: Removed asset_meta loading ---
try: # self.asset_meta = self._load_asset_metadata()
with open(strategy_config_path, 'r') as f:
self.strategy_configs = {name: config for name, config in json.load(f).items() if config.get("enabled")} # --- NEW: State management logic ---
logging.info(f"Loaded {len(self.strategy_configs)} enabled strategies.") self.opened_positions_file = os.path.join("_data", "opened_positions.json")
except (FileNotFoundError, json.JSONDecodeError) as e: self.opened_positions = self._load_opened_positions()
logging.error(f"Could not load strategies from '{strategy_config_path}': {e}")
sys.exit(1) logging.info(f"Trade Executor started. Loaded {len(self.opened_positions)} positions.")
self.status_file_path = os.path.join("_logs", "trade_executor_status.json")
self.managed_positions_path = os.path.join("_data", "executor_managed_positions.json")
self.managed_positions = self._load_managed_positions()
def _load_agents(self) -> dict: def _load_agents(self) -> dict:
"""Discovers and initializes agents from environment variables.""" # ... (omitted for brevity, this logic is correct and unchanged) ...
exchanges = {} exchanges = {}
logging.info("Discovering agents from environment variables...") logging.info("Discovering agents from environment variables...")
for env_var, private_key in os.environ.items(): for env_var, private_key in os.environ.items():
@ -72,129 +71,123 @@ class TradeExecutor:
logging.error(f"Failed to initialize agent '{agent_name}': {e}") logging.error(f"Failed to initialize agent '{agent_name}': {e}")
return exchanges return exchanges
def _load_managed_positions(self) -> dict: # --- REVERTED: Removed asset metadata loading ---
"""Loads the state of which strategy manages which position.""" # def _load_asset_metadata(self) -> dict: ...
if os.path.exists(self.managed_positions_path):
# --- NEW: Position state save/load methods ---
def _load_opened_positions(self) -> dict:
"""Loads the state of currently managed positions from a JSON file."""
if not os.path.exists(self.opened_positions_file):
return {}
try: try:
with open(self.managed_positions_path, 'r') as f: with open(self.opened_positions_file, 'r', encoding='utf-8') as f:
logging.info("Loading existing managed positions state.")
return json.load(f) return json.load(f)
except (IOError, json.JSONDecodeError): except (json.JSONDecodeError, IOError) as e:
logging.warning("Could not read managed positions file. Starting fresh.") logging.error(f"Failed to read '{self.opened_positions_file}': {e}. Starting with empty state.", exc_info=True)
return {} return {}
def _save_managed_positions(self): def _save_opened_positions(self):
"""Saves the current state of managed positions.""" """Saves the current state of managed positions to a JSON file."""
try: try:
with open(self.managed_positions_path, 'w') as f: with open(self.opened_positions_file, 'w', encoding='utf-8') as f:
json.dump(self.managed_positions, f, indent=4) json.dump(self.opened_positions, f, indent=4)
logging.debug(f"Successfully saved {len(self.opened_positions)} positions to '{self.opened_positions_file}'")
except IOError as e: except IOError as e:
logging.error(f"Failed to save managed positions state: {e}") logging.error(f"Failed to write to '{self.opened_positions_file}': {e}", exc_info=True)
def _save_executor_status(self, perpetuals_state, spot_state, all_market_contexts): # --- REVERTED: Removed tick rounding function ---
"""Saves the current balances and open positions to a live status file.""" # def _round_to_tick(self, price, tick_size): ...
# This function is correct and does not need changes.
pass
def run(self): def run(self):
"""The main execution loop with advanced position management.""" """
logging.info("Starting Trade Executor loop...") Main execution loop. Waits for an order and updates state on success.
"""
logging.info("Trade Executor started. Waiting for orders...")
while True: while True:
try: try:
perpetuals_state = self.info.user_state(self.vault_address) order = self.order_execution_queue.get()
open_positions_api = {pos['position'].get('coin'): pos['position'] for pos in perpetuals_state.get('assetPositions', []) if float(pos.get('position', {}).get('szi', 0)) != 0} if not order:
continue
for name, config in self.strategy_configs.items(): logging.info(f"Received order: {order}")
coin = config['parameters'].get('coin')
size = config['parameters'].get('size')
# --- ADDED: Load leverage parameters from config ---
leverage_long = config['parameters'].get('leverage_long')
leverage_short = config['parameters'].get('leverage_short')
status_file = os.path.join("_data", f"strategy_status_{name}.json") agent_name = order['agent']
if not os.path.exists(status_file): continue action = order['action']
with open(status_file, 'r') as f: status = json.load(f) coin = order['coin']
is_buy = order['is_buy']
size = order['size']
limit_px = order.get('limit_px')
desired_signal = status.get('current_signal')
current_position = self.managed_positions.get(name)
agent_name = config.get("agent", "default").lower()
exchange_to_use = self.exchanges.get(agent_name) exchange_to_use = self.exchanges.get(agent_name)
if not exchange_to_use: if not exchange_to_use:
logging.error(f"[{name}] Agent '{agent_name}' not found. Skipping trade.") logging.error(f"Agent '{agent_name}' not found. Skipping order.")
continue continue
# --- State Machine Logic with Configurable Leverage --- response = None
if desired_signal == "BUY":
if not current_position:
if not all([size, leverage_long]):
logging.error(f"[{name}] 'size' or 'leverage_long' not defined. Skipping.")
continue
logging.warning(f"[{name}] ACTION: Open LONG for {coin} with {leverage_long}x leverage.") if action == "market_open" or action == "market_close":
exchange_to_use.update_leverage(int(leverage_long), coin) reduce_only = (action == "market_close")
exchange_to_use.market_open(coin, True, size, None, 0.01) log_action = "MARKET CLOSE" if reduce_only else "MARKET OPEN"
self.managed_positions[name] = {"coin": coin, "side": "long", "size": size} logging.warning(f"ACTION: {log_action} {coin} {'BUY' if is_buy else 'SELL'} {size}")
log_trade(strategy=name, coin=coin, action="OPEN_LONG", price=status.get('signal_price', 0), size=size, signal=desired_signal)
elif current_position['side'] == 'short': # --- REVERTED: Removed all slippage and rounding logic ---
if not all([size, leverage_long]): # The raw limit_px from the order is now used directly
logging.error(f"[{name}] 'size' or 'leverage_long' not defined. Skipping.") final_price = limit_px
continue logging.info(f"[{agent_name}] Using raw price for {coin}: {final_price}")
logging.warning(f"[{name}] ACTION: Close SHORT and open LONG for {coin} with {leverage_long}x leverage.") order_type = {"limit": {"tif": "Ioc"}}
exchange_to_use.update_leverage(int(leverage_long), coin) # --- REVERTED: Uses final_price (which is just limit_px) ---
exchange_to_use.market_open(coin, True, current_position['size'] + size, None, 0.01) response = exchange_to_use.order(coin, is_buy, size, final_price, order_type, reduce_only=reduce_only)
self.managed_positions[name] = {"coin": coin, "side": "long", "size": size} logging.info(f"Market order response: {response}")
log_trade(strategy=name, coin=coin, action="CLOSE_SHORT_&_REVERSE", price=status.get('signal_price', 0), size=size, signal=desired_signal)
elif desired_signal == "SELL": # --- NEW: STATE UPDATE ON SUCCESS ---
if not current_position: if response.get("status") == "ok":
if not all([size, leverage_short]): response_data = response.get("response", {},).get("data", {})
logging.error(f"[{name}] 'size' or 'leverage_short' not defined. Skipping.") if response_data and "statuses" in response_data:
continue # Check if the order status contains an error
if "error" not in response_data["statuses"][0]:
position_key = order['position_key']
if action == "market_open":
# Add to state
self.opened_positions[position_key] = {
"strategy": order['strategy'],
"coin": coin,
"side": "long" if is_buy else "short",
"open_time_utc": order['open_time_utc'],
"open_price": order['open_price'],
"amount": order['amount'],
# --- MODIFIED: Read leverage from the order ---
"leverage": order.get('leverage')
}
logging.info(f"Successfully opened position {position_key}. Saving state.")
elif action == "market_close":
# Remove from state
if position_key in self.opened_positions:
del self.opened_positions[position_key]
logging.info(f"Successfully closed position {position_key}. Saving state.")
else:
logging.warning(f"Received close confirmation for {position_key}, but it was not in state.")
logging.warning(f"[{name}] ACTION: Open SHORT for {coin} with {leverage_short}x leverage.") self._save_opened_positions() # Save state to disk
exchange_to_use.update_leverage(int(leverage_short), coin)
exchange_to_use.market_open(coin, False, size, None, 0.01)
self.managed_positions[name] = {"coin": coin, "side": "short", "size": size}
log_trade(strategy=name, coin=coin, action="OPEN_SHORT", price=status.get('signal_price', 0), size=size, signal=desired_signal)
elif current_position['side'] == 'long': else:
if not all([size, leverage_short]): logging.error(f"API Error for {action}: {response_data['statuses'][0]['error']}")
logging.error(f"[{name}] 'size' or 'leverage_short' not defined. Skipping.") else:
continue logging.error(f"Unexpected API response format: {response}")
else:
logging.error(f"API call failed, status: {response.get('status')}")
logging.warning(f"[{name}] ACTION: Close LONG and open SHORT for {coin} with {leverage_short}x leverage.")
exchange_to_use.update_leverage(int(leverage_short), coin)
exchange_to_use.market_open(coin, False, current_position['size'] + size, None, 0.01)
self.managed_positions[name] = {"coin": coin, "side": "short", "size": size}
log_trade(strategy=name, coin=coin, action="CLOSE_LONG_&_REVERSE", price=status.get('signal_price', 0), size=size, signal=desired_signal)
elif desired_signal == "FLAT": elif action == "update_leverage":
if current_position: leverage = int(size)
logging.warning(f"[{name}] ACTION: Close {current_position['side']} position for {coin}.") logging.warning(f"ACTION: UPDATE LEVERAGE {coin} to {leverage}x")
is_buy = current_position['side'] == 'short' response = exchange_to_use.update_leverage(leverage, coin)
exchange_to_use.market_open(coin, is_buy, current_position['size'], None, 0.01) logging.info(f"Update leverage response: {response}")
del self.managed_positions[name]
log_trade(strategy=name, coin=coin, action=f"CLOSE_{current_position['side'].upper()}", price=status.get('signal_price', 0), size=current_position['size'], signal=desired_signal)
self._save_managed_positions() else:
logging.warning(f"Received unknown action: {action}")
except Exception as e: except Exception as e:
logging.error(f"An error occurred in the main executor loop: {e}") logging.error(f"An error occurred in the main executor loop: {e}", exc_info=True)
time.sleep(1)
time.sleep(15)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run the Trade Executor.")
parser.add_argument("--log-level", default="normal", choices=['off', 'normal', 'debug'])
args = parser.parse_args()
executor = TradeExecutor(log_level=args.log_level)
try:
executor.run()
except KeyboardInterrupt:
logging.info("Trade Executor stopped.")

652
wallet_data.py Normal file
View File

@ -0,0 +1,652 @@
#!/usr/bin/env python3
"""
Hyperliquid Wallet Data Fetcher - FINAL Perfect Alignment
==========================================================
Complete Python script to pull all available data for a Hyperliquid wallet via API.
Requirements:
pip install hyperliquid-python-sdk
Usage:
python hyperliquid_wallet_data.py <wallet_address>
Example:
python hyperliquid_wallet_data.py 0xcd5051944f780a621ee62e39e493c489668acf4d
"""
import sys
import json
from datetime import datetime, timedelta
from typing import Optional, Dict, Any
from hyperliquid.info import Info
from hyperliquid.utils import constants
class HyperliquidWalletAnalyzer:
"""
Comprehensive wallet data analyzer for Hyperliquid exchange.
Fetches all available information about a specific wallet address.
"""
def __init__(self, wallet_address: str, use_testnet: bool = False):
"""
Initialize the analyzer with a wallet address.
Args:
wallet_address: Ethereum-style address (0x...)
use_testnet: If True, use testnet instead of mainnet
"""
self.wallet_address = wallet_address
api_url = constants.TESTNET_API_URL if use_testnet else constants.MAINNET_API_URL
# Initialize Info API (read-only, no private keys needed)
self.info = Info(api_url, skip_ws=True)
print(f"Initialized Hyperliquid API: {'Testnet' if use_testnet else 'Mainnet'}")
print(f"Target wallet: {wallet_address}\n")
def print_position_details(self, position: Dict[str, Any], index: int):
"""
Print detailed information about a single position.
Args:
position: Position data dictionary
index: Position number for display
"""
pos = position.get('position', {})
# Extract all position details
coin = pos.get('coin', 'Unknown')
size = float(pos.get('szi', 0))
entry_px = float(pos.get('entryPx', 0))
position_value = float(pos.get('positionValue', 0))
unrealized_pnl = float(pos.get('unrealizedPnl', 0))
return_on_equity = float(pos.get('returnOnEquity', 0))
# Leverage details
leverage = pos.get('leverage', {})
leverage_type = leverage.get('type', 'unknown') if isinstance(leverage, dict) else 'cross'
leverage_value = leverage.get('value', 0) if isinstance(leverage, dict) else 0
# Margin and liquidation
margin_used = float(pos.get('marginUsed', 0))
liquidation_px = pos.get('liquidationPx')
max_trade_szs = pos.get('maxTradeSzs', [0, 0])
# Cumulative funding
cumulative_funding = float(pos.get('cumFunding', {}).get('allTime', 0))
# Determine if long or short
side = "LONG 📈" if size > 0 else "SHORT 📉"
side_color = "🟢" if size > 0 else "🔴"
# PnL color
pnl_symbol = "🟢" if unrealized_pnl >= 0 else "🔴"
pnl_sign = "+" if unrealized_pnl >= 0 else ""
# ROE color
roe_symbol = "🟢" if return_on_equity >= 0 else "🔴"
roe_sign = "+" if return_on_equity >= 0 else ""
print(f"\n{'='*80}")
print(f"POSITION #{index}: {coin} {side} {side_color}")
print(f"{'='*80}")
print(f"\n📊 POSITION DETAILS:")
print(f" Size: {abs(size):.6f} {coin}")
print(f" Side: {side}")
print(f" Entry Price: ${entry_px:,.4f}")
print(f" Position Value: ${abs(position_value):,.2f}")
print(f"\n💰 PROFITABILITY:")
print(f" Unrealized PnL: {pnl_symbol} {pnl_sign}${unrealized_pnl:,.2f}")
print(f" Return on Equity: {roe_symbol} {roe_sign}{return_on_equity:.2%}")
print(f" Cumulative Funding: ${cumulative_funding:,.4f}")
print(f"\n⚙️ LEVERAGE & MARGIN:")
print(f" Leverage Type: {leverage_type.upper()}")
print(f" Leverage: {leverage_value}x")
print(f" Margin Used: ${margin_used:,.2f}")
print(f"\n⚠️ RISK MANAGEMENT:")
if liquidation_px:
liquidation_px_float = float(liquidation_px) if liquidation_px else 0
print(f" Liquidation Price: ${liquidation_px_float:,.4f}")
# Calculate distance to liquidation
if entry_px > 0 and liquidation_px_float > 0:
if size > 0: # Long position
distance = ((entry_px - liquidation_px_float) / entry_px) * 100
else: # Short position
distance = ((liquidation_px_float - entry_px) / entry_px) * 100
distance_symbol = "🟢" if abs(distance) > 20 else "🟡" if abs(distance) > 10 else "🔴"
print(f" Distance to Liq: {distance_symbol} {abs(distance):.2f}%")
else:
print(f" Liquidation Price: N/A (Cross margin)")
if max_trade_szs and len(max_trade_szs) == 2:
print(f" Max Long Trade: {max_trade_szs[0]}")
print(f" Max Short Trade: {max_trade_szs[1]}")
print(f"\n{'='*80}")
def get_user_state(self) -> Dict[str, Any]:
"""
Get complete user state including positions and margin summary.
Returns:
Dict containing:
- assetPositions: List of open perpetual positions
- marginSummary: Account value, margin used, withdrawable
- crossMarginSummary: Cross margin details
- withdrawable: Available balance to withdraw
"""
print("📊 Fetching User State (Perpetuals)...")
try:
data = self.info.user_state(self.wallet_address)
if data:
margin_summary = data.get('marginSummary', {})
positions = data.get('assetPositions', [])
account_value = float(margin_summary.get('accountValue', 0))
total_margin_used = float(margin_summary.get('totalMarginUsed', 0))
total_ntl_pos = float(margin_summary.get('totalNtlPos', 0))
total_raw_usd = float(margin_summary.get('totalRawUsd', 0))
withdrawable = float(data.get('withdrawable', 0))
print(f" ✓ Account Value: ${account_value:,.2f}")
print(f" ✓ Total Margin Used: ${total_margin_used:,.2f}")
print(f" ✓ Total Position Value: ${total_ntl_pos:,.2f}")
print(f" ✓ Withdrawable: ${withdrawable:,.2f}")
print(f" ✓ Open Positions: {len(positions)}")
# Calculate margin utilization
if account_value > 0:
margin_util = (total_margin_used / account_value) * 100
util_symbol = "🟢" if margin_util < 50 else "🟡" if margin_util < 75 else "🔴"
print(f" ✓ Margin Utilization: {util_symbol} {margin_util:.2f}%")
# Print detailed information for each position
if positions:
print(f"\n{'='*80}")
print(f"DETAILED POSITION BREAKDOWN ({len(positions)} positions)")
print(f"{'='*80}")
for idx, position in enumerate(positions, 1):
self.print_position_details(position, idx)
# Summary table with perfect alignment
self.print_positions_summary_table(positions)
else:
print(" ⚠ No perpetual positions found")
return data
except Exception as e:
print(f" ✗ Error: {e}")
return {}
def print_positions_summary_table(self, positions: list):
"""
Print a summary table of all positions with perfectly aligned columns.
NO emojis in data cells - keeps them simple text only for perfect alignment.
Args:
positions: List of position dictionaries
"""
print(f"\n{'='*130}")
print("POSITIONS SUMMARY TABLE")
print('='*130)
# Print header
print("| Asset | Side | Size | Entry Price | Position Value | Unrealized PnL | ROE | Leverage |")
print("|----------|-------|-------------------|-------------------|-------------------|-------------------|------------|------------|")
total_position_value = 0
total_pnl = 0
for position in positions:
pos = position.get('position', {})
coin = pos.get('coin', 'Unknown')
size = float(pos.get('szi', 0))
entry_px = float(pos.get('entryPx', 0))
position_value = float(pos.get('positionValue', 0))
unrealized_pnl = float(pos.get('unrealizedPnl', 0))
return_on_equity = float(pos.get('returnOnEquity', 0))
# Get leverage
leverage = pos.get('leverage', {})
leverage_value = leverage.get('value', 0) if isinstance(leverage, dict) else 0
leverage_type = leverage.get('type', 'cross') if isinstance(leverage, dict) else 'cross'
# Determine side - NO EMOJIS in data
side_text = "LONG" if size > 0 else "SHORT"
# Format PnL and ROE with signs
pnl_sign = "+" if unrealized_pnl >= 0 else ""
roe_sign = "+" if return_on_equity >= 0 else ""
# Accumulate totals
total_position_value += abs(position_value)
total_pnl += unrealized_pnl
# Format all values as strings with proper width
asset_str = f"{coin[:8]:<8}"
side_str = f"{side_text:<5}"
size_str = f"{abs(size):>17,.4f}"
entry_str = f"${entry_px:>16,.2f}"
value_str = f"${abs(position_value):>16,.2f}"
pnl_str = f"{pnl_sign}${unrealized_pnl:>15,.2f}"
roe_str = f"{roe_sign}{return_on_equity:>9.2%}"
lev_str = f"{leverage_value}x {leverage_type[:4]}"
# Print row with exact spacing
print(f"| {asset_str} | {side_str} | {size_str} | {entry_str} | {value_str} | {pnl_str} | {roe_str} | {lev_str:<10} |")
# Separator before totals
print("|==========|=======|===================|===================|===================|===================|============|============|")
# Total row
total_value_str = f"${total_position_value:>16,.2f}"
total_pnl_sign = "+" if total_pnl >= 0 else ""
total_pnl_str = f"{total_pnl_sign}${total_pnl:>15,.2f}"
print(f"| TOTAL | | | | {total_value_str} | {total_pnl_str} | | |")
print('='*130 + '\n')
def get_spot_state(self) -> Dict[str, Any]:
"""
Get spot trading state including token balances.
Returns:
Dict containing:
- balances: List of spot token holdings
"""
print("\n💰 Fetching Spot State...")
try:
data = self.info.spot_user_state(self.wallet_address)
if data and data.get('balances'):
print(f" ✓ Spot Holdings: {len(data['balances'])} tokens")
for balance in data['balances'][:5]: # Show first 5
print(f" - {balance.get('coin', 'Unknown')}: {balance.get('total', 0)}")
else:
print(" ⚠ No spot holdings found")
return data
except Exception as e:
print(f" ✗ Error: {e}")
return {}
def get_open_orders(self) -> list:
"""
Get all open orders for the user.
Returns:
List of open orders with details (price, size, side, etc.)
"""
print("\n📋 Fetching Open Orders...")
try:
data = self.info.open_orders(self.wallet_address)
if data:
print(f" ✓ Open Orders: {len(data)}")
for order in data[:3]: # Show first 3
coin = order.get('coin', 'Unknown')
side = order.get('side', 'Unknown')
size = order.get('sz', 0)
price = order.get('limitPx', 0)
print(f" - {coin} {side}: {size} @ ${price}")
else:
print(" ⚠ No open orders")
return data
except Exception as e:
print(f" ✗ Error: {e}")
return []
def get_user_fills(self, limit: int = 100) -> list:
"""
Get recent trade fills (executions).
Args:
limit: Maximum number of fills to retrieve (max 2000)
Returns:
List of fills with execution details, PnL, timestamps
"""
print(f"\n📈 Fetching Recent Fills (last {limit})...")
try:
data = self.info.user_fills(self.wallet_address)
if data:
fills = data[:limit]
print(f" ✓ Total Fills Retrieved: {len(fills)}")
# Show summary stats
total_pnl = sum(float(f.get('closedPnl', 0)) for f in fills if f.get('closedPnl'))
print(f" ✓ Total Closed PnL: ${total_pnl:.2f}")
# Show most recent
if fills:
recent = fills[0]
print(f" ✓ Most Recent: {recent.get('coin')} {recent.get('side')} {recent.get('sz')} @ ${recent.get('px')}")
else:
print(" ⚠ No fills found")
return data[:limit] if data else []
except Exception as e:
print(f" ✗ Error: {e}")
return []
def get_user_fills_by_time(self, start_time: Optional[int] = None,
end_time: Optional[int] = None) -> list:
"""
Get fills within a specific time range.
Args:
start_time: Start timestamp in milliseconds (default: 7 days ago)
end_time: End timestamp in milliseconds (default: now)
Returns:
List of fills within the time range
"""
if not start_time:
start_time = int((datetime.now() - timedelta(days=7)).timestamp() * 1000)
if not end_time:
end_time = int(datetime.now().timestamp() * 1000)
print(f"\n📅 Fetching Fills by Time Range...")
print(f" From: {datetime.fromtimestamp(start_time/1000)}")
print(f" To: {datetime.fromtimestamp(end_time/1000)}")
try:
data = self.info.user_fills_by_time(self.wallet_address, start_time, end_time)
if data:
print(f" ✓ Fills in Range: {len(data)}")
else:
print(" ⚠ No fills in this time range")
return data
except Exception as e:
print(f" ✗ Error: {e}")
return []
def get_user_fees(self) -> Dict[str, Any]:
"""
Get user's fee schedule and trading volume.
Returns:
Dict containing:
- feeSchedule: Fee rates by tier
- userCrossRate: User's current cross trading fee rate
- userAddRate: User's maker fee rate
- userWithdrawRate: Withdrawal fee rate
- dailyUserVlm: Daily trading volume
"""
print("\n💳 Fetching Fee Information...")
try:
data = self.info.user_fees(self.wallet_address)
if data:
print(f" ✓ Maker Fee: {data.get('userAddRate', 0)}%")
print(f" ✓ Taker Fee: {data.get('userCrossRate', 0)}%")
print(f" ✓ Daily Volume: ${data.get('dailyUserVlm', [0])[0] if data.get('dailyUserVlm') else 0}")
return data
except Exception as e:
print(f" ✗ Error: {e}")
return {}
def get_user_rate_limit(self) -> Dict[str, Any]:
"""
Get API rate limit information.
Returns:
Dict containing:
- cumVlm: Cumulative trading volume
- nRequestsUsed: Number of requests used
- nRequestsCap: Request capacity
"""
print("\n⏱️ Fetching Rate Limit Info...")
try:
data = self.info.user_rate_limit(self.wallet_address)
if data:
used = data.get('nRequestsUsed', 0)
cap = data.get('nRequestsCap', 0)
print(f" ✓ API Requests: {used}/{cap}")
print(f" ✓ Cumulative Volume: ${data.get('cumVlm', 0)}")
return data
except Exception as e:
print(f" ✗ Error: {e}")
return {}
def get_funding_history(self, coin: str, days: int = 7) -> list:
"""
Get funding rate history for a specific coin.
Args:
coin: Asset symbol (e.g., 'BTC', 'ETH')
days: Number of days of history (default: 7)
Returns:
List of funding rate entries
"""
end_time = int(datetime.now().timestamp() * 1000)
start_time = int((datetime.now() - timedelta(days=days)).timestamp() * 1000)
print(f"\n📊 Fetching Funding History for {coin}...")
try:
data = self.info.funding_history(coin, start_time, end_time)
if data:
print(f" ✓ Funding Entries: {len(data)}")
if data:
latest = data[-1]
print(f" ✓ Latest Rate: {latest.get('fundingRate', 0)}")
return data
except Exception as e:
print(f" ✗ Error: {e}")
return []
def get_user_funding_history(self, days: int = 7) -> list:
"""
Get user's funding payments history.
Args:
days: Number of days of history (default: 7)
Returns:
List of funding payments
"""
end_time = int(datetime.now().timestamp() * 1000)
start_time = int((datetime.now() - timedelta(days=days)).timestamp() * 1000)
print(f"\n💸 Fetching User Funding Payments (last {days} days)...")
try:
data = self.info.user_funding_history(self.wallet_address, start_time, end_time)
if data:
print(f" ✓ Funding Payments: {len(data)}")
total_funding = sum(float(f.get('usdc', 0)) for f in data)
print(f" ✓ Total Funding P&L: ${total_funding:.2f}")
else:
print(" ⚠ No funding payments found")
return data
except Exception as e:
print(f" ✗ Error: {e}")
return []
def get_user_non_funding_ledger_updates(self, days: int = 7) -> list:
"""
Get non-funding ledger updates (deposits, withdrawals, liquidations).
Args:
days: Number of days of history (default: 7)
Returns:
List of ledger updates
"""
end_time = int(datetime.now().timestamp() * 1000)
start_time = int((datetime.now() - timedelta(days=days)).timestamp() * 1000)
print(f"\n📒 Fetching Ledger Updates (last {days} days)...")
try:
data = self.info.user_non_funding_ledger_updates(self.wallet_address, start_time, end_time)
if data:
print(f" ✓ Ledger Updates: {len(data)}")
# Categorize updates
deposits = [u for u in data if 'deposit' in str(u.get('delta', {})).lower()]
withdrawals = [u for u in data if 'withdraw' in str(u.get('delta', {})).lower()]
print(f" ✓ Deposits: {len(deposits)}, Withdrawals: {len(withdrawals)}")
else:
print(" ⚠ No ledger updates found")
return data
except Exception as e:
print(f" ✗ Error: {e}")
return []
def get_referral_state(self) -> Dict[str, Any]:
"""
Get referral program state for the user.
Returns:
Dict with referral status and earnings
"""
print("\n🎁 Fetching Referral State...")
try:
data = self.info.query_referral_state(self.wallet_address)
if data:
print(f" ✓ Referral Code: {data.get('referralCode', 'N/A')}")
print(f" ✓ Referees: {len(data.get('referees', []))}")
return data
except Exception as e:
print(f" ✗ Error: {e}")
return {}
def get_sub_accounts(self) -> list:
"""
Get list of sub-accounts for the user.
Returns:
List of sub-account addresses
"""
print("\n👥 Fetching Sub-Accounts...")
try:
data = self.info.query_sub_accounts(self.wallet_address)
if data:
print(f" ✓ Sub-Accounts: {len(data)}")
else:
print(" ⚠ No sub-accounts found")
return data
except Exception as e:
print(f" ✗ Error: {e}")
return []
def fetch_all_data(self, save_to_file: bool = True) -> Dict[str, Any]:
"""
Fetch all available data for the wallet.
Args:
save_to_file: If True, save results to JSON file
Returns:
Dict containing all fetched data
"""
print("=" * 80)
print("HYPERLIQUID WALLET DATA FETCHER")
print("=" * 80)
all_data = {
'wallet_address': self.wallet_address,
'timestamp': datetime.now().isoformat(),
'data': {}
}
# Fetch all data sections
all_data['data']['user_state'] = self.get_user_state()
all_data['data']['spot_state'] = self.get_spot_state()
all_data['data']['open_orders'] = self.get_open_orders()
all_data['data']['recent_fills'] = self.get_user_fills(limit=50)
all_data['data']['fills_last_7_days'] = self.get_user_fills_by_time()
all_data['data']['user_fees'] = self.get_user_fees()
all_data['data']['rate_limit'] = self.get_user_rate_limit()
all_data['data']['funding_payments'] = self.get_user_funding_history(days=7)
all_data['data']['ledger_updates'] = self.get_user_non_funding_ledger_updates(days=7)
all_data['data']['referral_state'] = self.get_referral_state()
all_data['data']['sub_accounts'] = self.get_sub_accounts()
# Optional: Fetch funding history for positions
user_state = all_data['data']['user_state']
if user_state and user_state.get('assetPositions'):
all_data['data']['funding_history'] = {}
for position in user_state['assetPositions'][:3]: # First 3 positions
coin = position.get('position', {}).get('coin')
if coin:
all_data['data']['funding_history'][coin] = self.get_funding_history(coin, days=7)
print("\n" + "=" * 80)
print("DATA COLLECTION COMPLETE")
print("=" * 80)
# Save to file
if save_to_file:
filename = f"hyperliquid_wallet_data_{self.wallet_address[:10]}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
with open(filename, 'w') as f:
json.dump(all_data, f, indent=2, default=str)
print(f"\n💾 Data saved to: {filename}")
return all_data
def main():
"""Main execution function."""
if len(sys.argv) < 2:
print("Usage: python hyperliquid_wallet_data.py <wallet_address> [--testnet]")
print("\nExample:")
print(" python hyperliquid_wallet_data.py 0xcd5051944f780a621ee62e39e493c489668acf4d")
sys.exit(1)
wallet_address = sys.argv[1]
use_testnet = '--testnet' in sys.argv
# Validate wallet address format
if not wallet_address.startswith('0x') or len(wallet_address) != 42:
print("❌ Error: Invalid wallet address format")
print(" Address must be in format: 0x followed by 40 hexadecimal characters")
sys.exit(1)
try:
analyzer = HyperliquidWalletAnalyzer(wallet_address, use_testnet=use_testnet)
data = analyzer.fetch_all_data(save_to_file=True)
print("\n✅ All data fetched successfully!")
print(f"\n📊 Summary:")
print(f" - Account Value: ${data['data']['user_state'].get('marginSummary', {}).get('accountValue', 0)}")
print(f" - Open Positions: {len(data['data']['user_state'].get('assetPositions', []))}")
print(f" - Spot Holdings: {len(data['data']['spot_state'].get('balances', []))}")
print(f" - Open Orders: {len(data['data']['open_orders'])}")
print(f" - Recent Fills: {len(data['data']['recent_fills'])}")
except Exception as e:
print(f"\n❌ Fatal Error: {e}")
import traceback
traceback.print_exc()
sys.exit(1)
if __name__ == "__main__":
main()

367
whale_tracker.py Normal file
View File

@ -0,0 +1,367 @@
import json
import os
import time
import requests
import logging
import argparse
import sys
from datetime import datetime, timedelta
# --- Configuration ---
# !! IMPORTANT: Update this to your actual Hyperliquid API endpoint !!
API_ENDPOINT = "https://api.hyperliquid.xyz/info"
INPUT_FILE = os.path.join("_data", "wallets_to_track.json")
OUTPUT_FILE = os.path.join("_data", "wallets_info.json")
LOGS_DIR = "_logs"
LOG_FILE = os.path.join(LOGS_DIR, "whale_tracker.log")
# Polling intervals (in seconds)
POLL_INTERVALS = {
'core_data': 10, # 5-15s range
'open_orders': 20, # 15-30s range
'account_metrics': 180, # 1-5m range
'ledger_updates': 600, # 5-15m range
'save_data': 5, # How often to write to wallets_info.json
'reload_wallets': 60 # Check for wallet list changes every 60s
}
class HyperliquidAPI:
"""
Client to handle POST requests to the Hyperliquid info endpoint.
"""
def __init__(self, base_url):
self.base_url = base_url
self.session = requests.Session()
logging.info(f"API Client initialized for endpoint: {base_url}")
def post_request(self, payload):
"""
Internal helper to send POST requests and handle errors.
"""
try:
response = self.session.post(self.base_url, json=payload, timeout=10)
response.raise_for_status() # Raise an exception for bad status codes (4xx or 5xx)
return response.json()
except requests.exceptions.HTTPError as e:
logging.error(f"HTTP Error: {e.response.status_code} for {e.request.url}. Response: {e.response.text}")
except requests.exceptions.ConnectionError as e:
logging.error(f"Connection Error: {e}")
except requests.exceptions.Timeout:
logging.error(f"Request timed out for payload: {payload.get('type')}")
except json.JSONDecodeError:
logging.error(f"Failed to decode JSON response. Response text: {response.text if 'response' in locals() else 'No response text'}")
except Exception as e:
logging.error(f"An unexpected error occurred in post_request: {e}", exc_info=True)
return None
def get_user_state(self, user_address: str):
payload = {"type": "clearinghouseState", "user": user_address}
return self.post_request(payload)
def get_open_orders(self, user_address: str):
payload = {"type": "openOrders", "user": user_address}
return self.post_request(payload)
def get_user_rate_limit(self, user_address: str):
payload = {"type": "userRateLimit", "user": user_address}
return self.post_request(payload)
def get_user_ledger_updates(self, user_address: str, start_time_ms: int, end_time_ms: int):
payload = {
"type": "userNonFundingLedgerUpdates",
"user": user_address,
"startTime": start_time_ms,
"endTime": end_time_ms
}
return self.post_request(payload)
class WalletTracker:
"""
Main class to track wallets, process data, and store results.
"""
def __init__(self, api_client, wallets_to_track):
self.api = api_client
self.wallets = wallets_to_track # This is the list of dicts
self.wallets_by_name = {w['name']: w for w in self.wallets}
self.wallets_data = {
wallet['name']: {"address": wallet['address']} for wallet in self.wallets
}
logging.info(f"WalletTracker initialized for {len(self.wallets)} wallets.")
def reload_wallets(self):
"""
Checks the INPUT_FILE for changes and updates the tracked wallet list.
"""
logging.debug("Reloading wallet list...")
try:
with open(INPUT_FILE, 'r') as f:
new_wallets_list = json.load(f)
if not isinstance(new_wallets_list, list):
logging.warning(f"Failed to reload '{INPUT_FILE}': content is not a list.")
return
new_wallets_by_name = {w['name']: w for w in new_wallets_list}
old_names = set(self.wallets_by_name.keys())
new_names = set(new_wallets_by_name.keys())
added_names = new_names - old_names
removed_names = old_names - new_names
if not added_names and not removed_names:
logging.debug("Wallet list is unchanged.")
return # No changes
# Update internal wallet list
self.wallets = new_wallets_list
self.wallets_by_name = new_wallets_by_name
# Add new wallets to wallets_data
for name in added_names:
self.wallets_data[name] = {"address": self.wallets_by_name[name]['address']}
logging.info(f"Added new wallet to track: {name}")
# Remove old wallets from wallets_data
for name in removed_names:
if name in self.wallets_data:
del self.wallets_data[name]
logging.info(f"Removed wallet from tracking: {name}")
logging.info(f"Wallet list reloaded. Tracking {len(self.wallets)} wallets.")
except (FileNotFoundError, json.JSONDecodeError, ValueError) as e:
logging.error(f"Failed to reload and parse '{INPUT_FILE}': {e}")
except Exception as e:
logging.error(f"Unexpected error during wallet reload: {e}", exc_info=True)
def calculate_core_metrics(self, state_data: dict) -> dict:
"""
Performs calculations based on user_state data.
"""
if not state_data or 'crossMarginSummary' not in state_data:
logging.warning("Core state data is missing 'crossMarginSummary'.")
return {"raw_state": state_data}
summary = state_data['crossMarginSummary']
account_value = float(summary.get('accountValue', 0))
margin_used = float(summary.get('totalMarginUsed', 0))
# Calculations
margin_utilization = (margin_used / account_value) if account_value > 0 else 0
available_margin = account_value - margin_used
total_position_value = 0
if 'assetPositions' in state_data:
for pos in state_data.get('assetPositions', []):
try:
# Use 'value' for position value
pos_value_str = pos.get('position', {}).get('value', '0')
total_position_value += float(pos_value_str)
except (ValueError, TypeError):
logging.warning(f"Could not parse position value: {pos.get('position', {}).get('value')}")
continue
portfolio_leverage = (total_position_value / account_value) if account_value > 0 else 0
# Return calculated metrics alongside raw data
return {
"raw_state": state_data,
"account_value": account_value,
"margin_used": margin_used,
"margin_utilization": margin_utilization,
"available_margin": available_margin,
"total_position_value": total_position_value,
"portfolio_leverage": portfolio_leverage
}
def poll_core_data(self):
logging.debug("Polling Core Data...")
# Use self.wallets which is updated by reload_wallets
for wallet in self.wallets:
name = wallet['name']
address = wallet['address']
state_data = self.api.get_user_state(address)
if state_data:
calculated_data = self.calculate_core_metrics(state_data)
# Ensure wallet hasn't been removed by a concurrent reload
if name in self.wallets_data:
self.wallets_data[name]['core_state'] = calculated_data
time.sleep(0.1) # Avoid bursting requests
def poll_open_orders(self):
logging.debug("Polling Open Orders...")
for wallet in self.wallets:
name = wallet['name']
address = wallet['address']
orders_data = self.api.get_open_orders(address)
if orders_data:
# TODO: Add calculations for 'pending_margin_required' if logic is available
if name in self.wallets_data:
self.wallets_data[name]['open_orders'] = {"raw_orders": orders_data}
time.sleep(0.1)
def poll_account_metrics(self):
logging.debug("Polling Account Metrics...")
for wallet in self.wallets:
name = wallet['name']
address = wallet['address']
metrics_data = self.api.get_user_rate_limit(address)
if metrics_data:
if name in self.wallets_data:
self.wallets_data[name]['account_metrics'] = metrics_data
time.sleep(0.1)
def poll_ledger_updates(self):
logging.debug("Polling Ledger Updates...")
end_time_ms = int(datetime.now().timestamp() * 1000)
start_time_ms = int((datetime.now() - timedelta(minutes=15)).timestamp() * 1000)
for wallet in self.wallets:
name = wallet['name']
address = wallet['address']
ledger_data = self.api.get_user_ledger_updates(address, start_time_ms, end_time_ms)
if ledger_data:
if name in self.wallets_data:
self.wallets_data[name]['ledger_updates'] = ledger_data
time.sleep(0.1)
def save_data_to_json(self):
"""
Atomically writes the current wallet data to the output JSON file.
(No longer needs cleaning logic)
"""
logging.debug(f"Saving data to {OUTPUT_FILE}...")
temp_file = OUTPUT_FILE + ".tmp"
try:
# Save the data
with open(temp_file, 'w', encoding='utf-8') as f:
# self.wallets_data is automatically kept clean by reload_wallets
json.dump(self.wallets_data, f, indent=2)
# Atomic rename (move)
os.replace(temp_file, OUTPUT_FILE)
except (IOError, json.JSONDecodeError) as e:
logging.error(f"Failed to write wallet data to file: {e}")
except Exception as e:
logging.error(f"An unexpected error occurred during file save: {e}")
if os.path.exists(temp_file):
os.remove(temp_file)
class WhaleTrackerRunner:
"""
Manages the polling loop using last-run timestamps instead of a complex scheduler.
"""
def __init__(self, api_client, wallets, shared_whale_data_dict=None): # Kept arg for compatibility
self.tracker = WalletTracker(api_client, wallets)
self.last_poll_times = {key: 0 for key in POLL_INTERVALS}
self.poll_intervals = POLL_INTERVALS
logging.info("WhaleTrackerRunner initialized to save to JSON file.")
def update_shared_data(self):
"""
This function is no longer called by the run loop.
It's kept here to prevent errors if imported elsewhere, but is now unused.
"""
logging.debug("No shared dict, saving data to JSON file.")
self.tracker.save_data_to_json()
def run(self):
logging.info("Starting main polling loop...")
while True:
try:
now = time.time()
if now - self.last_poll_times['reload_wallets'] > self.poll_intervals['reload_wallets']:
self.tracker.reload_wallets()
self.last_poll_times['reload_wallets'] = now
if now - self.last_poll_times['core_data'] > self.poll_intervals['core_data']:
self.tracker.poll_core_data()
self.last_poll_times['core_data'] = now
if now - self.last_poll_times['open_orders'] > self.poll_intervals['open_orders']:
self.tracker.poll_open_orders()
self.last_poll_times['open_orders'] = now
if now - self.last_poll_times['account_metrics'] > self.poll_intervals['account_metrics']:
self.tracker.poll_account_metrics()
self.last_poll_times['account_metrics'] = now
if now - self.last_poll_times['ledger_updates'] > self.poll_intervals['ledger_updates']:
self.tracker.poll_ledger_updates()
self.last_poll_times['ledger_updates'] = now
if now - self.last_poll_times['save_data'] > self.poll_intervals['save_data']:
self.tracker.save_data_to_json() # <-- NEW
self.last_poll_times['save_data'] = now
# Sleep for a short duration to prevent busy-waiting
time.sleep(1)
except Exception as e:
logging.critical(f"Unhandled exception in main loop: {e}", exc_info=True)
time.sleep(10)
def setup_logging(log_level_str: str, process_name: str):
"""Configures logging for the script."""
if not os.path.exists(LOGS_DIR):
try:
os.makedirs(LOGS_DIR)
except OSError as e:
print(f"Failed to create logs directory {LOGS_DIR}: {e}")
return
level_map = {
'debug': logging.DEBUG,
'normal': logging.INFO,
'off': logging.NOTSET
}
log_level = level_map.get(log_level_str.lower(), logging.INFO)
if log_level == logging.NOTSET:
return
handlers_list = [logging.FileHandler(LOG_FILE, mode='a')]
if sys.stdout.isatty():
handlers_list.append(logging.StreamHandler(sys.stdout))
logging.basicConfig(
level=log_level,
format=f"%(asctime)s.%(msecs)03d | {process_name:<20} | %(levelname)-8s | %(message)s",
datefmt='%Y-%m-%d %H:%M:%S',
handlers=handlers_list
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Hyperliquid Whale Tracker")
parser.add_argument("--log-level", default="normal", choices=['off', 'normal', 'debug'])
args = parser.parse_args()
setup_logging(args.log_level, "WhaleTracker")
# Load wallets to track
wallets_to_track = []
try:
with open(INPUT_FILE, 'r') as f:
wallets_to_track = json.load(f)
if not isinstance(wallets_to_track, list) or not wallets_to_track:
raise ValueError(f"'{INPUT_FILE}' is empty or not a list.")
except (FileNotFoundError, json.JSONDecodeError, ValueError) as e:
logging.critical(f"Failed to load '{INPUT_FILE}': {e}. Exiting.")
sys.exit(1)
# Initialize API client
api_client = HyperliquidAPI(base_url=API_ENDPOINT)
# Initialize and run the tracker
runner = WhaleTrackerRunner(api_client, wallets_to_track, shared_whale_data_dict=None)
try:
runner.run()
except KeyboardInterrupt:
logging.info("Whale Tracker shutting down.")
sys.exit(0)