trade_executor, agent creator

This commit is contained in:
2025-10-16 13:18:39 +02:00
parent 0d53200882
commit 25df8b8ba9
15 changed files with 541 additions and 133 deletions

Binary file not shown.

View File

@ -36,12 +36,12 @@
"market_cap": 10637373991.458858
},
"TOTAL_market_cap_daily": {
"datetime_utc": "2025-10-15 00:00:00",
"market_cap": 3950478733651.1655
"datetime_utc": "2025-10-16 00:00:00",
"market_cap": 3849619103702.8604
},
"PUMP_market_cap": {
"datetime_utc": "2025-10-14 21:02:30",
"market_cap": 1454398647.593871
},
"summary_last_updated_utc": "2025-10-15T00:16:07.128221+00:00"
"summary_last_updated_utc": "2025-10-16T00:16:09.640449+00:00"
}

View File

@ -1,58 +1,43 @@
{
"sma_125d_eth": {
"enabled": true,
"script": "strategy_template.py",
"parameters": {
"coin": "ETH",
"timeframe": "1D",
"sma_period": 125
}
},
"sma_cross_1": {
"enabled": true,
"script": "strategy_sma_cross.py",
"parameters": {
"coin": "ETH",
"timeframe": "5m",
"sma_period": 5,
"rma_period": 10,
"ema_period": 15
"slow": 44,
"fast": 7,
"size": 0.0055
}
},
"sma_cross_2": {
"enabled": true,
"enabled": false,
"script": "strategy_sma_cross.py",
"parameters": {
"coin": "BTC",
"timeframe": "5m",
"sma_period": 5
"sma_period": 5,
"size": 0.0001
}
},
"sma_125d_btc": {
"enabled": true,
"enabled": false,
"script": "strategy_template.py",
"parameters": {
"coin": "BTC",
"timeframe": "1D",
"sma_period": 125
"sma_period": 125,
"size": 0.0001
}
},
"sma_44d_btc": {
"enabled": true,
"enabled": false,
"script": "strategy_template.py",
"parameters": {
"coin": "BTC",
"timeframe": "1D",
"sma_period": 44
}
},
"sma_5m_eth": {
"enabled": true,
"script": "strategy_template.py",
"parameters": {
"coin": "ETH",
"timeframe": "5m",
"sma_period": 5
"sma_period": 44,
"size": 0.0001
}
}
}

View File

@ -3,5 +3,5 @@
"current_signal": "SELL",
"last_signal_change_utc": "2025-10-14T00:00:00+00:00",
"signal_price": 113026.0,
"last_checked_utc": "2025-10-15T16:31:15.415923+00:00"
"last_checked_utc": "2025-10-16T10:42:03.203292+00:00"
}

View File

@ -3,5 +3,5 @@
"current_signal": "BUY",
"last_signal_change_utc": "2025-08-26T00:00:00+00:00",
"signal_price": 4600.63,
"last_checked_utc": "2025-10-15T16:31:15.411175+00:00"
"last_checked_utc": "2025-10-15T17:35:17.663159+00:00"
}

View File

@ -3,5 +3,5 @@
"current_signal": "SELL",
"last_signal_change_utc": "2025-10-14T00:00:00+00:00",
"signal_price": 113026.0,
"last_checked_utc": "2025-10-15T16:31:15.422945+00:00"
"last_checked_utc": "2025-10-16T10:42:03.202977+00:00"
}

View File

@ -1,7 +1,7 @@
{
"strategy_name": "sma_5m_eth",
"current_signal": "BUY",
"last_signal_change_utc": "2025-10-15T16:00:00+00:00",
"signal_price": 3976.4,
"last_checked_utc": "2025-10-15T16:30:15.367655+00:00"
"current_signal": "SELL",
"last_signal_change_utc": "2025-10-15T17:30:00+00:00",
"signal_price": 3937.5,
"last_checked_utc": "2025-10-15T17:35:05.035566+00:00"
}

View File

@ -1,7 +1,7 @@
{
"strategy_name": "sma_cross_1",
"current_signal": "BUY",
"last_signal_change_utc": "2025-10-15T16:00:00+00:00",
"signal_price": 3976.4,
"last_checked_utc": "2025-10-15T16:30:15.368224+00:00"
"last_signal_change_utc": "2025-10-16T09:40:00+00:00",
"signal_price": 4013.6,
"last_checked_utc": "2025-10-16T11:15:05.033673+00:00"
}

View File

@ -1,7 +1,7 @@
{
"strategy_name": "sma_cross_2",
"current_signal": "BUY",
"last_signal_change_utc": "2025-10-15T16:25:00+00:00",
"signal_price": 111016.0,
"last_checked_utc": "2025-10-15T16:30:15.380563+00:00"
"current_signal": "SELL",
"last_signal_change_utc": "2025-10-16T10:30:00+00:00",
"signal_price": 111342.0,
"last_checked_utc": "2025-10-16T10:40:05.037771+00:00"
}

11
agents
View File

@ -1,3 +1,8 @@
agent 001
wallet: 0x7773833262f020c7979ec8aae38455c17ba4040c
Private Key: 0x659326d719a4322244d6e7f28e7fa2780f034e9f6a342ef1919664817e6248df
==================================================
SAVE THESE SECURELY. This is what your bot will use.
Name: trade_executor
(Agent has a default long-term validity)
🔑 Agent Private Key: 0xabed7379ec33253694eba50af8a392a88ea32b72b5f4f9cddceb0f5879428b69
🏠 Agent Address: 0xcB262CeAaE5D8A99b713f87a43Dd18E6Be892739
==================================================

69
create_agent.py Normal file
View File

@ -0,0 +1,69 @@
import os
from eth_account import Account
from hyperliquid.exchange import Exchange
from hyperliquid.utils import constants
from dotenv import load_dotenv
from datetime import datetime, timedelta
import json
# Load environment variables from a .env file if it exists
load_dotenv()
def create_and_authorize_agent():
"""
Creates and authorizes a new agent key pair using your main wallet,
following the correct SDK pattern.
"""
# --- STEP 1: Load your main wallet ---
# This is the wallet that holds the funds and has been activated on Hyperliquid.
main_wallet_private_key = os.environ.get("MAIN_WALLET_PRIVATE_KEY")
if not main_wallet_private_key:
main_wallet_private_key = input("Please enter the private key of your MAIN trading wallet: ")
try:
main_account = Account.from_key(main_wallet_private_key)
print(f"\n✅ Loaded main wallet: {main_account.address}")
except Exception as e:
print(f"❌ Error: Invalid main wallet private key provided. Details: {e}")
return
# --- STEP 2: Initialize the Exchange with your MAIN account ---
# This object is used to send the authorization transaction.
exchange = Exchange(main_account, constants.MAINNET_API_URL, account_address=main_account.address)
# --- STEP 3: Create and approve the agent with a specific name ---
agent_name = "trade_executor"
print(f"\n🔗 Authorizing a new agent named '{agent_name}'...")
try:
# --- FIX: Pass only the agent name string to the function ---
approve_result, agent_private_key = exchange.approve_agent(agent_name)
if approve_result.get("status") == "ok":
# Derive the agent's public address from the key we received
agent_account = Account.from_key(agent_private_key)
print("\n🎉 SUCCESS! Agent has been authorized on-chain.")
print("="*50)
print("SAVE THESE SECURELY. This is what your bot will use.")
print(f" Name: {agent_name}")
print(f" (Agent has a default long-term validity)")
print(f"🔑 Agent Private Key: {agent_private_key}")
print(f"🏠 Agent Address: {agent_account.address}")
print("="*50)
print("\nYou can now set this private key as the AGENT_PRIVATE_KEY environment variable.")
else:
print("\n❌ ERROR: Agent authorization failed.")
print(" Response:", approve_result)
if "Vault may not perform this action" in str(approve_result):
print("\n ACTION REQUIRED: This error means your main wallet (vault) has not been activated. "
"Please go to the Hyperliquid website, connect this wallet, and make a deposit to activate it.")
except Exception as e:
print(f"\nAn unexpected error occurred during authorization: {e}")
if __name__ == "__main__":
create_and_authorize_agent()

View File

@ -25,6 +25,7 @@ DB_PATH = os.path.join("_data", "market_data.db")
STATUS_FILE = os.path.join("_data", "fetcher_status.json")
MARKET_CAP_SUMMARY_FILE = os.path.join("_data", "market_cap_data.json")
LOGS_DIR = "_logs"
TRADE_EXECUTOR_STATUS_FILE = os.path.join(LOGS_DIR, "trade_executor_status.json")
def format_market_cap(mc_value):
@ -81,11 +82,11 @@ def data_fetcher_scheduler():
time.sleep(1)
def run_resampler_job():
def run_resampler_job(timeframes_to_generate: list):
"""Defines the job for the resampler, redirecting output to a log file."""
log_file = os.path.join(LOGS_DIR, "resampler.log")
try:
command = [sys.executable, RESAMPLER_SCRIPT, "--coins"] + WATCHED_COINS + ["--log-level", "off"]
command = [sys.executable, RESAMPLER_SCRIPT, "--coins"] + WATCHED_COINS + ["--timeframes"] + timeframes_to_generate + ["--log-level", "off"]
with open(log_file, 'a') as f:
f.write(f"\n--- Starting resampler.py job at {datetime.now()} ---\n")
subprocess.run(command, check=True, stdout=f, stderr=subprocess.STDOUT)
@ -95,11 +96,11 @@ def run_resampler_job():
f.write(f"Failed to run resampler.py job: {e}\n")
def resampler_scheduler():
def resampler_scheduler(timeframes_to_generate: list):
"""Schedules the resampler.py script."""
setup_logging('off', 'ResamplerScheduler')
run_resampler_job()
schedule.every(4).minutes.do(run_resampler_job)
run_resampler_job(timeframes_to_generate)
schedule.every(4).minutes.do(run_resampler_job, timeframes_to_generate)
while True:
schedule.run_pending()
time.sleep(1)
@ -152,6 +153,7 @@ class MainApp:
self.prices = {}
self.market_caps = {}
self.last_db_update_info = "Initializing..."
self.open_positions = {}
self.background_processes = processes
self.process_status = {}
self.strategy_configs = strategy_configs
@ -182,16 +184,30 @@ class MainApp:
def read_strategy_statuses(self):
"""Reads the status JSON file for each enabled strategy."""
for name in self.strategy_configs.keys():
status_file = os.path.join("_data", f"strategy_status_{name}.json")
if os.path.exists(status_file):
try:
with open(status_file, 'r', encoding='utf-8') as f:
self.strategy_statuses[name] = json.load(f)
except (IOError, json.JSONDecodeError):
self.strategy_statuses[name] = {"error": "Could not read status file."}
else:
self.strategy_statuses[name] = {"current_signal": "Initializing..."}
enabled_statuses = {}
for name, config in self.strategy_configs.items():
if config.get("enabled", False):
status_file = os.path.join("_data", f"strategy_status_{name}.json")
if os.path.exists(status_file):
try:
with open(status_file, 'r', encoding='utf-8') as f:
enabled_statuses[name] = json.load(f)
except (IOError, json.JSONDecodeError):
enabled_statuses[name] = {"error": "Could not read status file."}
else:
enabled_statuses[name] = {"current_signal": "Initializing..."}
self.strategy_statuses = enabled_statuses
def read_executor_status(self):
"""Reads the live status file from the trade executor."""
if os.path.exists(TRADE_EXECUTOR_STATUS_FILE):
try:
with open(TRADE_EXECUTOR_STATUS_FILE, 'r', encoding='utf-8') as f:
self.open_positions = json.load(f)
except (IOError, json.JSONDecodeError):
logging.debug("Could not read trade executor status file.")
else:
self.open_positions = {}
def get_overall_db_status(self):
@ -227,12 +243,11 @@ class MainApp:
"""Displays a formatted dashboard with side-by-side tables."""
print("\x1b[H\x1b[J", end="") # Clear screen
# --- Build Left Table (Market Dashboard) ---
left_table_lines = []
left_table_width = 44
left_table_lines.append("--- Market Dashboard ---\t\t")
left_table_lines.append("--- Market Dashboard ---")
left_table_lines.append("-" * left_table_width)
left_table_lines.append(f"{'#':^2} | {'Coin':^6} | {'Live Price':>10} | {'Market Cap':>15} |")
left_table_lines.append(f"{'#':<2} | {'Coin':^6} | {'Live Price':>10} | {'Market Cap':>15} |")
left_table_lines.append("-" * left_table_width)
for i, coin in enumerate(self.watched_coins, 1):
price = self.prices.get(coin, "Loading...")
@ -241,12 +256,11 @@ class MainApp:
left_table_lines.append(f"{i:<2} | {coin:^6} | {price:>10} | {formatted_mc:>15} |")
left_table_lines.append("-" * left_table_width)
# --- Build Right Table (Strategy Status) ---
right_table_lines = []
right_table_width = 148
right_table_width = 154
right_table_lines.append("--- Strategy Status ---")
right_table_lines.append("-" * right_table_width)
right_table_lines.append(f"{'#':<2} | {'Strategy Name':<25} | {'Coin':^6} | {'Signal':<8} | {'Signal Price':>12} | {'Last Change (Local)':>22} | {'TF':^5} | {'Parameters':<45} |")
right_table_lines.append(f"{'#':^2} | {'Strategy Name':<25} | {'Coin':^6} | {'Signal':^8} | {'Signal Price':>12} | {'Last Change':>17} | {'TF':^5} | {'Size':^8} | {'Parameters':<45} |")
right_table_lines.append("-" * right_table_width)
for i, (name, status) in enumerate(self.strategy_statuses.items(), 1):
signal = status.get('current_signal', 'N/A')
@ -255,7 +269,6 @@ class MainApp:
last_change = status.get('last_signal_change_utc')
last_change_display = 'Never'
if last_change:
# Convert UTC timestamp from file to local time for display
dt_utc = datetime.fromisoformat(last_change.replace('Z', '+00:00')).replace(tzinfo=timezone.utc)
dt_local = dt_utc.astimezone(None)
last_change_display = dt_local.strftime('%Y-%m-%d %H:%M')
@ -263,14 +276,14 @@ class MainApp:
config_params = self.strategy_configs.get(name, {}).get('parameters', {})
coin = config_params.get('coin', 'N/A')
timeframe = config_params.get('timeframe', 'N/A')
size = config_params.get('size', 'N/A')
other_params = {k: v for k, v in config_params.items() if k not in ['coin', 'timeframe']}
other_params = {k: v for k, v in config_params.items() if k not in ['coin', 'timeframe', 'size']}
params_str = ", ".join([f"{k}={v}" for k, v in other_params.items()])
right_table_lines.append(f"{i:^2} | {name:<25} | {coin:^6} | {signal:<8} | {price_display:>12} | {last_change_display:>22} | {timeframe:^5} | {params_str:<45} |")
right_table_lines.append(f"{i:^2} | {name:<25} | {coin:^6} | {signal:^8} | {price_display:>12} | {last_change_display:>17} | {timeframe:^5} | {size:>8} | {params_str:<45} |")
right_table_lines.append("-" * right_table_width)
# --- Combine Tables Side-by-Side ---
output_lines = []
max_rows = max(len(left_table_lines), len(right_table_lines))
separator = " "
@ -280,8 +293,43 @@ class MainApp:
right_part = indent + right_table_lines[i] if i < len(right_table_lines) else ""
output_lines.append(f"{left_part}{separator}{right_part}")
# --- Add Bottom Sections ---
output_lines.append(f"\nDB Status: Last update -> {self.last_db_update_info}")
output_lines.append("\n--- Open Positions ---")
pos_table_width = 100
output_lines.append("-" * pos_table_width)
output_lines.append(f"{'Account':<10} | {'Coin':<6} | {'Size':>15} | {'Entry Price':>12} | {'Mark Price':>12} | {'PNL':>15} | {'Leverage':>10} |")
output_lines.append("-" * pos_table_width)
perps_positions = self.open_positions.get('perpetuals_account', {}).get('open_positions', [])
spot_positions = self.open_positions.get('spot_account', {}).get('positions', [])
if not perps_positions and not spot_positions:
output_lines.append("No open positions found.")
else:
for pos in perps_positions:
# --- FIX: Safely handle potentially None values before formatting ---
try:
pnl = float(pos.get('pnl', 0.0))
pnl_str = f"${pnl:,.2f}"
except (ValueError, TypeError):
pnl_str = "Error"
coin = pos.get('coin') or '-'
size = pos.get('size') or '-'
entry_price = pos.get('entry_price') or '-'
mark_price = pos.get('mark_price') or '-'
leverage = pos.get('leverage') or '-'
output_lines.append(f"{'Perps':<10} | {coin:<6} | {size:>15} | {entry_price:>12} | {mark_price:>12} | {pnl_str:>15} | {leverage:>10} |")
for pos in spot_positions:
pnl = pos.get('pnl', 'N/A')
coin = pos.get('coin') or '-'
balance_size = pos.get('balance_size') or '-'
output_lines.append(f"{'Spot':<10} | {coin:<6} | {balance_size:>15} | {'-':>12} | {'-':>12} | {pnl:>15} | {'-':>10} |")
output_lines.append("-" * pos_table_width)
output_lines.append("\n--- Background Processes ---")
for name, status in self.process_status.items():
output_lines.append(f"{name:<25}: {status}")
@ -297,6 +345,7 @@ class MainApp:
self.read_market_caps()
self.get_overall_db_status()
self.read_strategy_statuses()
self.read_executor_status()
self.check_process_status()
self.display_dashboard()
time.sleep(2)
@ -318,25 +367,37 @@ if __name__ == "__main__":
processes = {}
strategy_configs = {}
processes["Market Feeder"] = multiprocessing.Process(target=run_market_feeder, daemon=True)
processes["Data Fetcher"] = multiprocessing.Process(target=data_fetcher_scheduler, daemon=True)
processes["Resampler"] = multiprocessing.Process(target=resampler_scheduler, daemon=True)
processes["Market Cap Fetcher"] = multiprocessing.Process(target=market_cap_fetcher_scheduler, daemon=True)
try:
with open(STRATEGY_CONFIG_FILE, 'r') as f:
strategy_configs = json.load(f)
for name, config in strategy_configs.items():
if config.get("enabled", False):
if not os.path.exists(config['script']):
logging.error(f"Strategy script '{config['script']}' for strategy '{name}' not found. Skipping.")
continue
proc = multiprocessing.Process(target=run_strategy, args=(name, config), daemon=True)
processes[f"Strategy: {name}"] = proc
except (FileNotFoundError, json.JSONDecodeError) as e:
logging.error(f"Could not load strategies from '{STRATEGY_CONFIG_FILE}': {e}")
sys.exit(1)
required_timeframes = set()
for name, config in strategy_configs.items():
if config.get("enabled", False):
tf = config.get("parameters", {}).get("timeframe")
if tf:
required_timeframes.add(tf)
if not required_timeframes:
logging.warning("No timeframes required by any enabled strategy. Resampler will not run effectively.")
processes["Market Feeder"] = multiprocessing.Process(target=run_market_feeder, daemon=True)
processes["Data Fetcher"] = multiprocessing.Process(target=data_fetcher_scheduler, daemon=True)
processes["Resampler"] = multiprocessing.Process(target=resampler_scheduler, args=(list(required_timeframes),), daemon=True)
processes["Market Cap Fetcher"] = multiprocessing.Process(target=market_cap_fetcher_scheduler, daemon=True)
for name, config in strategy_configs.items():
if config.get("enabled", False):
if not os.path.exists(config['script']):
logging.error(f"Strategy script '{config['script']}' for strategy '{name}' not found. Skipping.")
continue
proc = multiprocessing.Process(target=run_strategy, args=(name, config), daemon=True)
processes[f"Strategy: {name}"] = proc
# Launch all processes
for name, proc in processes.items():
logging.info(f"Starting process '{name}'...")
proc.start()

View File

@ -5,7 +5,8 @@ import sys
import sqlite3
import pandas as pd
import json
from datetime import datetime, timezone
from datetime import datetime, timezone, timedelta
import time
# Assuming logging_utils.py is in the same directory
from logging_utils import setup_logging
@ -14,6 +15,7 @@ class Resampler:
"""
Reads 1-minute candle data directly from the SQLite database, resamples
it to various timeframes, and stores the results back in the database.
This script is designed to run continuously as a self-scheduling service.
"""
def __init__(self, log_level: str, coins: list, timeframes: dict):
@ -30,27 +32,19 @@ class Resampler:
'volume': 'sum',
'number_of_trades': 'sum'
}
self.resampling_status = self._load_existing_status()
self.resampling_status = {}
def _load_existing_status(self) -> dict:
"""Loads the existing status file if it exists, otherwise returns an empty dict."""
if os.path.exists(self.status_file_path):
try:
with open(self.status_file_path, 'r', encoding='utf-8') as f:
logging.info(f"Loading existing status from '{self.status_file_path}'")
return json.load(f)
except (IOError, json.JSONDecodeError) as e:
logging.warning(f"Could not read existing status file. Starting fresh. Error: {e}")
return {}
def run(self):
def _execute_resampling_job(self):
"""
Main execution function to process all configured coins and update the database.
"""
if not os.path.exists(self.db_path):
logging.error(f"Database file '{self.db_path}' not found. "
"Please run the data fetcher script first.")
sys.exit(1)
return # Don't exit, just wait for the next cycle
# Load the latest status file at the start of each job
self.resampling_status = self._load_existing_status()
with sqlite3.connect(self.db_path) as conn:
conn.execute("PRAGMA journal_mode=WAL;")
@ -109,7 +103,40 @@ class Resampler:
logging.error(f"Failed to process coin '{coin}': {e}")
self._save_status()
logging.info("--- Resampling process complete ---")
logging.info("--- Resampling job complete ---")
def run_periodically(self):
"""Runs the resampling job at every 5-minute mark of the hour (00, 05, 10...)."""
logging.info("Resampler started. Waiting for the first scheduled run...")
while True:
# 1. Calculate sleep time
now = datetime.now(timezone.utc)
# Calculate how many minutes past the last 5-minute mark we are
minutes_past_mark = now.minute % 5
seconds_past_mark = minutes_past_mark * 60 + now.second + (now.microsecond / 1_000_000)
# The total interval is 5 minutes (300 seconds)
sleep_duration = 300 - seconds_past_mark
# Add a small buffer to ensure the candle data is ready
sleep_duration += 5
logging.info(f"Next resampling run in {sleep_duration:.2f} seconds.")
time.sleep(sleep_duration)
# 2. Execute the job
logging.info("Scheduled time reached. Starting resampling job...")
self._execute_resampling_job()
def _load_existing_status(self) -> dict:
"""Loads the existing status file if it exists, otherwise returns an empty dict."""
if os.path.exists(self.status_file_path):
try:
with open(self.status_file_path, 'r', encoding='utf-8') as f:
return json.load(f)
except (IOError, json.JSONDecodeError) as e:
logging.warning(f"Could not read existing status file. Starting fresh. Error: {e}")
return {}
def _save_status(self):
"""Saves the final resampling status to a JSON file."""
@ -138,7 +165,6 @@ def parse_timeframes(tf_strings: list) -> dict:
if unit == 'm':
code = f"{numeric_part}min"
elif unit == 'w':
# --- FIX: Use uppercase 'W' for weeks to avoid deprecation warning ---
code = f"{numeric_part}W"
elif unit in ['h', 'd']:
code = f"{numeric_part}{unit}"
@ -151,39 +177,30 @@ def parse_timeframes(tf_strings: list) -> dict:
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Resample 1-minute candle data from SQLite to other timeframes.")
parser.add_argument(
"--coins",
nargs='+',
default=["BTC", "ETH", "SOL", "BNB", "HYPE", "ASTER", "ZEC", "PUMP", "SUI"],
help="List of coins to process."
)
parser.add_argument(
"--timeframes",
nargs='+',
default=['4m', '5m', '15m', '30m', '37m', '148m', '4h', '12h', '1d', '1w'],
help="List of timeframes to generate (e.g., 5m 1h 1d)."
)
parser.add_argument(
"--timeframe",
dest="timeframes",
nargs='+',
help=argparse.SUPPRESS
)
parser.add_argument(
"--log-level",
default="normal",
choices=['off', 'normal', 'debug'],
help="Set the logging level for the script."
)
args = parser.parse_args()
# The script now runs as a long-running service, loading its config from a file.
CONFIG_FILE = "resampler_conf.json"
try:
with open(CONFIG_FILE, 'r') as f:
config = json.load(f)
coins = config.get("coins", [])
timeframes_list = config.get("timeframes", [])
except (FileNotFoundError, json.JSONDecodeError) as e:
print(f"FATAL: Could not load '{CONFIG_FILE}'. Please ensure it exists and is valid. Error: {e}")
sys.exit(1)
# Use a basic log level until the class is initialized
setup_logging('normal', 'Resampler')
timeframes_dict = parse_timeframes(args.timeframes)
timeframes_dict = parse_timeframes(timeframes_list)
resampler = Resampler(
log_level=args.log_level,
coins=args.coins,
log_level='normal',
coins=coins,
timeframes=timeframes_dict
)
resampler.run()
try:
resampler.run_periodically()
except KeyboardInterrupt:
logging.info("Resampler process stopped.")

216
trade_executor.py Normal file
View File

@ -0,0 +1,216 @@
import argparse
import logging
import os
import sys
import json
import time
from datetime import datetime
from eth_account import Account
from hyperliquid.exchange import Exchange
from hyperliquid.info import Info
from hyperliquid.utils import constants
from dotenv import load_dotenv
from logging_utils import setup_logging
from trade_log import log_trade
# Load environment variables from a .env file
load_dotenv()
class TradeExecutor:
"""
Monitors strategy signals, executes trades, logs all trade actions to a
persistent CSV, and maintains a live JSON status of the account.
"""
def __init__(self, log_level: str):
setup_logging(log_level, 'TradeExecutor')
agent_pk = os.environ.get("AGENT_PRIVATE_KEY")
if not agent_pk:
logging.error("AGENT_PRIVATE_KEY environment variable not set. Cannot execute trades.")
sys.exit(1)
self.vault_address = os.environ.get("MAIN_WALLET_ADDRESS")
if not self.vault_address:
logging.error("MAIN_WALLET_ADDRESS environment variable not set. Cannot query account state.")
sys.exit(1)
self.account = Account.from_key(agent_pk)
logging.info(f"Trade Executor initialized. Agent: {self.account.address}, Vault: {self.vault_address}")
self.exchange = Exchange(self.account, constants.MAINNET_API_URL, account_address=self.vault_address)
self.info = Info(constants.MAINNET_API_URL, skip_ws=True)
strategy_config_path = os.path.join("_data", "strategies.json")
try:
with open(strategy_config_path, 'r') as f:
self.strategy_configs = {name: config for name, config in json.load(f).items() if config.get("enabled")}
logging.info(f"Loaded {len(self.strategy_configs)} enabled strategies.")
except (FileNotFoundError, json.JSONDecodeError) as e:
logging.error(f"Could not load strategies from '{strategy_config_path}': {e}")
sys.exit(1)
self.status_file_path = os.path.join("_logs", "trade_executor_status.json")
def _save_executor_status(self, perpetuals_state, spot_state, all_market_contexts):
"""Saves the current balances and open positions from both accounts to a live status file."""
status = {
"last_updated_utc": datetime.now().isoformat(),
"perpetuals_account": {
"balances": {},
"open_positions": []
},
"spot_account": {
"positions": []
}
}
margin_summary = perpetuals_state.get("marginSummary", {})
status["perpetuals_account"]["balances"] = {
"account_value": margin_summary.get("accountValue"),
"total_margin_used": margin_summary.get("totalMarginUsed"),
"withdrawable": margin_summary.get("withdrawable")
}
asset_positions = perpetuals_state.get("assetPositions", [])
for asset_pos in asset_positions:
pos = asset_pos.get('position', {})
if float(pos.get('szi', 0)) != 0:
position_value = float(pos.get('positionValue', 0))
margin_used = float(pos.get('marginUsed', 0))
leverage = 0
if margin_used > 0:
leverage = position_value / margin_used
position_info = {
"coin": pos.get('coin'),
"size": pos.get('szi'),
"position_value": pos.get('positionValue'),
"entry_price": pos.get('entryPx'),
"mark_price": pos.get('markPx'),
"pnl": pos.get('unrealizedPnl'),
"liq_price": pos.get('liquidationPx'),
"margin": pos.get('marginUsed'),
"funding": pos.get('fundingRate'),
"leverage": f"{leverage:.1f}x"
}
status["perpetuals_account"]["open_positions"].append(position_info)
price_map = {
asset.get("universe", {}).get("name"): asset.get("markPx")
for asset in all_market_contexts
if asset.get("universe", {}).get("name")
}
spot_balances = spot_state.get("balances", [])
for bal in spot_balances:
total_balance = float(bal.get('total', 0))
if total_balance > 0:
coin = bal.get('coin')
mark_price = float(price_map.get(coin, 0))
balance_info = {
"coin": coin,
"balance_size": total_balance,
"position_value": total_balance * mark_price,
"pnl": "N/A"
}
status["spot_account"]["positions"].append(balance_info)
try:
with open(self.status_file_path, 'w', encoding='utf-8') as f:
json.dump(status, f, indent=4)
logging.debug(f"Successfully updated live executor status at '{self.status_file_path}'")
except IOError as e:
logging.error(f"Failed to write live executor status file: {e}")
def run(self):
"""The main execution loop."""
logging.info("Starting Trade Executor loop...")
while True:
try:
perpetuals_state = self.info.user_state(self.vault_address)
spot_state = self.info.spot_user_state(self.vault_address)
meta, asset_contexts = self.info.meta_and_asset_ctxs()
open_positions = {}
for asset_pos in perpetuals_state.get('assetPositions', []):
pos_details = asset_pos.get('position', {})
if float(pos_details.get('szi', 0)) != 0:
open_positions[pos_details.get('coin')] = pos_details
self._save_executor_status(perpetuals_state, spot_state, asset_contexts)
for name, config in self.strategy_configs.items():
coin = config['parameters'].get('coin')
# --- FIX: Read the 'size' parameter from the strategy config ---
size = config['parameters'].get('size')
status_file = os.path.join("_data", f"strategy_status_{name}.json")
if not os.path.exists(status_file):
continue
with open(status_file, 'r') as f:
status = json.load(f)
signal = status.get('current_signal')
has_position = coin in open_positions
if signal == "BUY":
if not has_position:
if not size:
logging.error(f"[{name}] 'size' parameter not defined in strategies.json. Skipping trade.")
continue
# --- Using the 'size' from config for all BUY signals ---
logging.warning(f"[{name}] SIGNAL: BUY for {coin}. ACTION: Opening new long position of size {size}.")
# Placeholder for live trading logic
# self.exchange.market_open(coin, True, size, None, 0.01)
price = status.get('signal_price', 0)
log_trade(strategy=name, coin=coin, action="OPEN_LONG", price=price, size=size, signal=signal)
elif signal == "SELL":
if has_position:
position_details = open_positions[coin]
position_size = float(position_details.get('szi', 0))
# Only close if it's a long position. Short logic would go here.
if position_size > 0:
logging.warning(f"[{name}] SIGNAL: SELL for {coin}. ACTION: Closing existing long position.")
# Placeholder for live trading logic
# self.exchange.market_close(coin)
price = float(position_details.get('markPx', 0))
pnl = float(position_details.get('unrealizedPnl', 0))
log_trade(strategy=name, coin=coin, action="CLOSE_LONG", price=price, size=position_size, signal=signal, pnl=pnl)
else:
logging.info(f"[{name}] SIGNAL: {signal} for {coin}. ACTION: No trade needed (Position: {'Open' if has_position else 'None'}).")
except Exception as e:
logging.error(f"An error occurred in the main executor loop: {e}")
time.sleep(15)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run the Trade Executor.")
parser.add_argument(
"--log-level",
default="normal",
choices=['off', 'normal', 'debug'],
help="Set the logging level for the script."
)
args = parser.parse_args()
executor = TradeExecutor(log_level=args.log_level)
try:
executor.run()
except KeyboardInterrupt:
logging.info("Trade Executor stopped.")

55
trade_log.py Normal file
View File

@ -0,0 +1,55 @@
import os
import csv
from datetime import datetime, timezone
import threading
# A lock to prevent race conditions when multiple strategies might log at once in the future
log_lock = threading.Lock()
def log_trade(strategy: str, coin: str, action: str, price: float, size: float, signal: str, pnl: float = 0.0):
"""
Appends a record of a trade action to a persistent CSV log file.
Args:
strategy (str): The name of the strategy that triggered the action.
coin (str): The coin being traded (e.g., 'BTC').
action (str): The action taken (e.g., 'OPEN_LONG', 'CLOSE_LONG').
price (float): The execution price of the trade.
size (float): The size of the trade.
signal (str): The signal that triggered the trade (e.g., 'BUY', 'SELL').
pnl (float, optional): The realized profit and loss for closing trades. Defaults to 0.0.
"""
log_dir = "_logs"
file_path = os.path.join(log_dir, "trade_history.csv")
# Ensure the logs directory exists
if not os.path.exists(log_dir):
os.makedirs(log_dir)
# Define the headers for the CSV file
headers = ["timestamp_utc", "strategy", "coin", "action", "price", "size", "signal", "pnl"]
# Check if the file needs a header
file_exists = os.path.isfile(file_path)
with log_lock:
try:
with open(file_path, 'a', newline='', encoding='utf-8') as f:
writer = csv.DictWriter(f, fieldnames=headers)
if not file_exists:
writer.writeheader()
writer.writerow({
"timestamp_utc": datetime.now(timezone.utc).isoformat(),
"strategy": strategy,
"coin": coin,
"action": action,
"price": price,
"size": size,
"signal": signal,
"pnl": pnl
})
except IOError as e:
# If logging fails, print an error to the main console as a fallback.
print(f"CRITICAL: Failed to write to trade log file: {e}")