🎯 Initial commit: Uniswap Auto CLP trading system

Core Components:
- uniswap_manager.py: V3 concentrated liquidity position manager
- clp_hedger.py: Hyperliquid perpetuals hedging bot
- requirements.txt: Python dependencies
- .gitignore: Security exclusions for sensitive data
- doc/: Project documentation
- tools/: Utility scripts and Git agent

Features:
- Automated liquidity provision on Uniswap V3 (WETH/USDC)
- Delta-neutral hedging using Hyperliquid perpetuals
- Position lifecycle management (open/close/rebalance)
- Automated backup and version control system

Security:
- Private keys and tokens excluded from version control
- Environment variables properly handled
- Automated security validation for backups

Git Agent:
- Hourly automated backups to separate branches
- Keep last 100 backups (~4 days coverage)
- Detailed change tracking and parameter monitoring
- Push to Gitea server automatically
- Manual main branch control preserved
- No performance tracking for privacy
- No notifications for simplicity

Files Added:
- git_agent.py: Main automation script
- agent_config.json: Configuration with Gitea settings
- git_utils.py: Git operations wrapper
- backup_manager.py: Backup branch management
- change_detector.py: File change analysis
- cleanup_manager.py: 100-backup rotation
- commit_formatter.py: Detailed commit messages
- README_GIT_AGENT.md: Complete usage documentation
This commit is contained in:
2025-12-19 20:30:48 +01:00
commit 5ca16ec33f
18 changed files with 4207 additions and 0 deletions

30
.gitignore vendored Normal file
View File

@ -0,0 +1,30 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# Virtual Environment
.venv/
venv/
ENV/
# Environment variables
.env
# Logs
logs/
*.log
# Project State
hedge_status.json
# IDEs
.vscode/
.idea/
.project
.pydevproject
.settings/
# Temporary files
*.tmp
*.bak

709
clp_hedger.py Normal file
View File

@ -0,0 +1,709 @@
import os
import time
import logging
import sys
import json
import math
from decimal import Decimal, getcontext, ROUND_DOWN
from typing import Optional, Dict, Any, List, Union
from dotenv import load_dotenv
# --- FIX: Add project root to sys.path to import local modules ---
current_dir = os.path.dirname(os.path.abspath(__file__))
project_root = os.path.dirname(current_dir)
sys.path.append(project_root)
# Import local modules
try:
from logging_utils import setup_logging
except ImportError:
logging.basicConfig(level=logging.INFO)
setup_logging = None
from eth_account import Account
from hyperliquid.exchange import Exchange
from hyperliquid.info import Info
from hyperliquid.utils import constants
# Load environment variables
dotenv_path = os.path.join(current_dir, '.env')
load_dotenv(dotenv_path if os.path.exists(dotenv_path) else None)
# --- LOGGING SETUP ---
# Ensure logs directory exists
log_dir = os.path.join(current_dir, 'logs')
os.makedirs(log_dir, exist_ok=True)
# Custom Filter for Millisecond Unix Timestamp (Matching Manager style)
class UnixMsLogFilter(logging.Filter):
def filter(self, record):
record.unix_ms = int(record.created * 1000)
return True
# Configure Logging
logger = logging.getLogger("SCALPER_HEDGER")
logger.setLevel(logging.INFO)
logger.handlers.clear() # Clear existing handlers to prevent duplicates
# Console Handler
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.INFO)
console_fmt = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
console_handler.setFormatter(console_fmt)
logger.addHandler(console_handler)
# File Handler
log_file = os.path.join(log_dir, 'clp_hedger.log')
file_handler = logging.FileHandler(log_file, encoding='utf-8')
file_handler.setLevel(logging.INFO)
file_handler.addFilter(UnixMsLogFilter())
file_fmt = logging.Formatter('%(unix_ms)d, %(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler.setFormatter(file_fmt)
logger.addHandler(file_handler)
# --- DECIMAL PRECISION CONFIGURATION ---
getcontext().prec = 50
# --- CONFIGURATION ---
COIN_SYMBOL = "ETH"
CHECK_INTERVAL = 1
LEVERAGE = 5
STATUS_FILE = "hedge_status.json"
# Strategy Zones
ZONE_BOTTOM_HEDGE_LIMIT = Decimal("1.0")
ZONE_CLOSE_START = Decimal("10.0")
ZONE_CLOSE_END = Decimal("11.0")
ZONE_TOP_HEDGE_START = Decimal("10.0")
# Order Settings
PRICE_BUFFER_PCT = Decimal("0.0015") # 0.15%
MIN_THRESHOLD_ETH = Decimal("0.012")
MIN_ORDER_VALUE_USD = Decimal("10.0")
# Capital Safety
DYNAMIC_THRESHOLD_MULTIPLIER = Decimal("1.3")
MIN_TIME_BETWEEN_TRADES = 25
MAX_HEDGE_MULTIPLIER = Decimal("1.25")
# Edge Protection
EDGE_PROXIMITY_PCT = Decimal("0.04")
VELOCITY_THRESHOLD_PCT = Decimal("0.0005")
POSITION_OPEN_EDGE_PROXIMITY_PCT = Decimal("0.06")
POSITION_CLOSED_EDGE_PROXIMITY_PCT = Decimal("0.025")
LARGE_HEDGE_MULTIPLIER = Decimal("2.8")
# --- HELPER FUNCTIONS ---
def to_decimal(value: Any) -> Decimal:
"""Safely convert value to Decimal."""
if value is None:
return Decimal("0")
return Decimal(str(value))
def round_to_sz_decimals_precise(amount: Decimal, sz_decimals: int) -> float:
"""Round Decimal amount to specific decimals and return float for SDK."""
if amount == 0:
return 0.0
quantizer = Decimal("1").scaleb(-sz_decimals)
rounded = amount.quantize(quantizer, rounding=ROUND_DOWN)
return float(rounded)
def round_to_sig_figs_precise(x: Decimal, sig_figs: int = 5) -> float:
"""Round Decimal to significant figures and return float for SDK."""
if x == 0:
return 0.0
# Use string formatting for sig figs as it's robust
return float(f"{x:.{sig_figs}g}")
def validate_trade_size(size: Decimal, sz_decimals: int, min_order_value: Decimal, price: Decimal) -> float:
"""Validate trade size against minimums."""
if size <= 0:
return 0.0
# Check minimum order value
order_value = size * price
if order_value < min_order_value:
return 0.0
# Check dust
min_size = Decimal("10") ** (-sz_decimals)
if size < min_size:
return 0.0
return round_to_sz_decimals_precise(size, sz_decimals)
# --- STATE MANAGEMENT ---
def get_active_automatic_position() -> Optional[Dict]:
if not os.path.exists(STATUS_FILE):
return None
try:
with open(STATUS_FILE, 'r') as f:
data = json.load(f)
# Expecting a list of positions
if isinstance(data, list):
for entry in data:
if entry.get('type') == 'AUTOMATIC' and entry.get('status') in ['OPEN', 'PENDING_HEDGE', 'CLOSING']:
return entry
# Fallback if single dict (legacy)
elif isinstance(data, dict):
if data.get('type') == 'AUTOMATIC' and data.get('status') in ['OPEN', 'PENDING_HEDGE', 'CLOSING']:
return data
except Exception as e:
logger.error(f"ERROR reading status file: {e}")
return None
def update_position_zones_in_json(token_id: int, zones_data: Dict):
if not os.path.exists(STATUS_FILE): return
try:
with open(STATUS_FILE, 'r') as f:
data = json.load(f)
# Ensure list
if isinstance(data, dict): data = [data]
updated = False
for entry in data:
if entry.get('token_id') == token_id:
entry.update(zones_data)
updated = True
break
if updated:
with open(STATUS_FILE, 'w') as f:
json.dump(data, f, indent=2)
logger.info(f"Updated JSON zones for Position {token_id}")
except Exception as e:
logger.error(f"Error updating JSON zones: {e}")
def update_position_stats(token_id: int, stats_data: Dict):
if not os.path.exists(STATUS_FILE): return
try:
with open(STATUS_FILE, 'r') as f:
data = json.load(f)
if isinstance(data, dict): data = [data]
updated = False
for entry in data:
if entry.get('token_id') == token_id:
entry.update(stats_data)
updated = True
break
if updated:
with open(STATUS_FILE, 'w') as f:
json.dump(data, f, indent=2)
except Exception as e:
logger.error(f"Error updating JSON stats: {e}")
# --- STRATEGY CLASS ---
class HyperliquidStrategy:
def __init__(self, entry_amount0: Decimal, entry_amount1: Decimal, target_value: Decimal,
entry_price: Decimal, low_range: Decimal, high_range: Decimal, start_price: Decimal):
self.entry_amount0 = entry_amount0
self.entry_amount1 = entry_amount1
self.target_value = target_value
self.entry_price = entry_price
self.low_range = low_range
self.high_range = high_range
self.start_price = start_price
self.gap = max(Decimal("0.0"), entry_price - start_price)
self.recovery_target = entry_price + (Decimal("2") * self.gap)
self.L = Decimal("0.0")
try:
sqrt_P = entry_price.sqrt()
sqrt_Pa = low_range.sqrt()
sqrt_Pb = high_range.sqrt()
# Method 1: Amount0 (WETH)
if entry_amount0 > 0:
# Assuming amount0 is already in standard units (ETH) from JSON
denom0 = (Decimal("1") / sqrt_P) - (Decimal("1") / sqrt_Pb)
if denom0 > Decimal("1e-10"):
self.L = entry_amount0 / denom0
logger.info(f"Calculated L from Amount0: {self.L:.4f}")
# Method 2: Amount1 (USDC)
if self.L == 0 and entry_amount1 > 0:
denom1 = sqrt_P - sqrt_Pa
if denom1 > Decimal("1e-10"):
self.L = entry_amount1 / denom1
logger.info(f"Calculated L from Amount1: {self.L:.4f}")
# Method 3: Target Value Heuristic
if self.L == 0:
logger.warning("Amounts missing. Using Target Value Heuristic.")
max_eth = target_value / low_range
denom_h = (Decimal("1") / sqrt_Pa) - (Decimal("1") / sqrt_Pb)
if denom_h > 0:
self.L = max_eth / denom_h
logger.info(f"Calculated L from Target Value: {self.L:.4f}")
else:
logger.error("Critical: Invalid Range for L calculation")
except Exception as e:
logger.error(f"Error calculating liquidity: {e}")
sys.exit(1)
def get_pool_delta(self, current_price: Decimal) -> Decimal:
if current_price >= self.high_range:
return Decimal("0.0")
if current_price <= self.low_range:
sqrt_Pa = self.low_range.sqrt()
sqrt_Pb = self.high_range.sqrt()
return self.L * ((Decimal("1")/sqrt_Pa) - (Decimal("1")/sqrt_Pb))
sqrt_P = current_price.sqrt()
sqrt_Pb = self.high_range.sqrt()
return self.L * ((Decimal("1")/sqrt_P) - (Decimal("1")/sqrt_Pb))
def calculate_rebalance(self, current_price: Decimal, current_short_size: Decimal) -> Dict:
pool_delta = self.get_pool_delta(current_price)
# Over-Hedge Logic
overhedge_pct = Decimal("0.0")
range_width = self.high_range - self.low_range
if range_width > 0:
price_pct = (current_price - self.low_range) / range_width
# If below 80% of range
if price_pct < Decimal("0.8"):
# Formula: 0.75% boost for every 0.1 drop below 0.8
diff_factor = (Decimal("0.8") - max(Decimal("0.0"), price_pct)) / Decimal("0.1")
overhedge_pct = diff_factor * Decimal("0.0075")
raw_target_short = pool_delta
adjusted_target_short = raw_target_short * (Decimal("1.0") + overhedge_pct)
diff = adjusted_target_short - abs(current_short_size)
return {
"current_price": current_price,
"pool_delta": pool_delta,
"target_short": adjusted_target_short,
"current_short": abs(current_short_size),
"diff": diff,
"action": "SELL" if diff > 0 else "BUY",
"overhedge_pct": overhedge_pct
}
# --- MAIN HEDGER CLASS ---
class ScalperHedger:
def __init__(self):
self.private_key = os.environ.get("HEDGER_PRIVATE_KEY")
self.vault_address = os.environ.get("MAIN_WALLET_ADDRESS")
if not self.private_key:
logger.error("No HEDGER_PRIVATE_KEY found in .env")
sys.exit(1)
self.account = Account.from_key(self.private_key)
self.info = Info(constants.MAINNET_API_URL, skip_ws=True)
self.exchange = Exchange(self.account, constants.MAINNET_API_URL, account_address=self.vault_address)
# Set Leverage
try:
logger.info(f"Setting leverage to {LEVERAGE}x (Cross)...")
self.exchange.update_leverage(LEVERAGE, COIN_SYMBOL, is_cross=True)
except Exception as e:
logger.error(f"Failed to update leverage: {e}")
self.strategy: Optional[HyperliquidStrategy] = None
self.sz_decimals = self._get_sz_decimals(COIN_SYMBOL)
self.active_position_id = None
# Safety & State
self.last_price: Optional[Decimal] = None
self.last_trade_time = 0
# Velocity Tracking
self.last_price_for_velocity: Optional[Decimal] = None
self.price_history: List[Decimal] = []
self.velocity_history: List[Decimal] = []
# PnL Tracking
self.strategy_start_time = 0
self.last_pnl_check_time = 0
self.trade_history_seen = set()
self.accumulated_pnl = Decimal("0.0")
self.accumulated_fees = Decimal("0.0")
# Order Tracking
self.original_order_side = None
logger.info(f"[DELTA] Delta-Zero Scalper Hedger initialized. Agent: {self.account.address}")
def _init_strategy(self, position_data: Dict):
try:
entry_amount0 = to_decimal(position_data.get('amount0_initial', 0))
entry_amount1 = to_decimal(position_data.get('amount1_initial', 0))
target_value = to_decimal(position_data.get('target_value', 50))
entry_price = to_decimal(position_data['entry_price'])
lower = to_decimal(position_data['range_lower'])
upper = to_decimal(position_data['range_upper'])
start_price = self.get_market_price(COIN_SYMBOL)
if start_price is None:
logger.warning("Waiting for initial price to start strategy...")
return
self.strategy = HyperliquidStrategy(
entry_amount0=entry_amount0,
entry_amount1=entry_amount1,
target_value=target_value,
entry_price=entry_price,
low_range=lower,
high_range=upper,
start_price=start_price
)
# Reset State
self.last_price = start_price
self.last_trade_time = 0
self.price_history = [start_price]
self.strategy_start_time = int(time.time() * 1000)
self.trade_history_seen = set()
self.accumulated_pnl = Decimal("0.0")
self.accumulated_fees = Decimal("0.0")
self.active_position_id = position_data['token_id']
update_position_stats(self.active_position_id, {
"hedge_pnl_realized": 0.0,
"hedge_fees_paid": 0.0
})
logger.info(f"[DELTA] Strat Init: Pos {self.active_position_id} | Range: {lower}-{upper} | Entry: {entry_price} | Start Px: {start_price:.2f}")
except Exception as e:
logger.error(f"Failed to init strategy: {e}")
self.strategy = None
def _get_sz_decimals(self, coin: str) -> int:
try:
meta = self.info.meta()
for asset in meta["universe"]:
if asset["name"] == coin:
return asset["szDecimals"]
return 4
except: return 4
def get_market_price(self, coin: str) -> Optional[Decimal]:
try:
mids = self.info.all_mids()
if coin in mids:
return to_decimal(mids[coin])
except: pass
return None
def get_order_book_levels(self, coin: str) -> Optional[Dict[str, Decimal]]:
try:
snapshot = self.info.l2_snapshot(coin)
if snapshot and 'levels' in snapshot:
bids = snapshot['levels'][0]
asks = snapshot['levels'][1]
if bids and asks:
best_bid = to_decimal(bids[0]['px'])
best_ask = to_decimal(asks[0]['px'])
mid = (best_bid + best_ask) / Decimal("2")
return {'bid': best_bid, 'ask': best_ask, 'mid': mid}
return None
except: return None
def get_current_position(self, coin: str) -> Dict[str, Decimal]:
try:
user_state = self.info.user_state(self.vault_address or self.account.address)
for pos in user_state["assetPositions"]:
if pos["position"]["coin"] == coin:
return {
'size': to_decimal(pos["position"]["szi"]),
'pnl': to_decimal(pos["position"]["unrealizedPnl"])
}
return {'size': Decimal("0"), 'pnl': Decimal("0")}
except: return {'size': Decimal("0"), 'pnl': Decimal("0")}
def get_open_orders(self) -> List[Dict]:
try:
return self.info.open_orders(self.vault_address or self.account.address)
except: return []
def cancel_order(self, coin: str, oid: int):
logger.info(f"Cancelling order {oid}...")
try:
return self.exchange.cancel(coin, oid)
except Exception as e:
logger.error(f"Error cancelling order: {e}")
def place_limit_order(self, coin: str, is_buy: bool, size: Decimal, price: Decimal, order_type: str = "Alo") -> Optional[int]:
# Validate using Decimal logic
validated_size_float = validate_trade_size(size, self.sz_decimals, MIN_ORDER_VALUE_USD, price)
if validated_size_float == 0:
logger.error(f"Trade size {size} invalid after validation")
return None
price_float = round_to_sig_figs_precise(price, 5)
logger.info(f"[ORDER] {order_type.upper()} {coin} {'BUY' if is_buy else 'SELL'} {validated_size_float} @ {price_float}")
try:
order_result = self.exchange.order(coin, is_buy, validated_size_float, price_float, {"limit": {"tif": order_type}}, reduce_only=is_buy)
status = order_result["status"]
if status == "ok":
response_data = order_result["response"]["data"]
if "statuses" in response_data:
status_obj = response_data["statuses"][0]
if "resting" in status_obj:
return status_obj["resting"]["oid"]
elif "filled" in status_obj:
logger.info("Order filled immediately.")
return status_obj["filled"]["oid"]
elif "error" in status_obj:
logger.error(f"Order API Error: {status_obj['error']}")
else:
logger.error(f"Order Failed: {order_result}")
except Exception as e:
logger.error(f"Exception during trade: {e}")
return None
def manage_orders(self) -> bool:
"""Returns True if there is an active order that should prevent new trades."""
open_orders = self.get_open_orders()
my_orders = [o for o in open_orders if o['coin'] == COIN_SYMBOL]
if not my_orders:
return False
if len(my_orders) > 1:
logger.warning("Multiple orders found. Cancelling all.")
for o in my_orders:
self.cancel_order(COIN_SYMBOL, o['oid'])
return False
order = my_orders[0]
oid = order['oid']
order_price = to_decimal(order['limitPx'])
# Check if price moved too far
levels = self.get_order_book_levels(COIN_SYMBOL)
if not levels: return True # Keep order if data missing
current_mid = levels['mid']
pct_diff = abs(current_mid - order_price) / order_price
# Dynamic Buffer logic (Simplified for Decimal)
# Using base buffer for now, can be enhanced
if pct_diff > PRICE_BUFFER_PCT:
logger.info(f"Price moved {pct_diff*100:.3f}% > {PRICE_BUFFER_PCT*100:.3f}%. Cancelling {oid}.")
self.cancel_order(COIN_SYMBOL, oid)
return False
logger.info(f"Order {oid} within range ({pct_diff*100:.3f}%). Waiting.")
return True
def track_fills_and_pnl(self, force: bool = False):
try:
now = time.time()
if not force and now - self.last_pnl_check_time < 10:
return
self.last_pnl_check_time = now
user_fills = self.info.user_fills(self.vault_address or self.account.address)
new_activity = False
for fill in user_fills:
if fill['coin'] != COIN_SYMBOL: continue
if fill['time'] < self.strategy_start_time: continue
fill_id = fill.get('tid')
if fill_id in self.trade_history_seen: continue
self.trade_history_seen.add(fill_id)
fees = to_decimal(fill['fee'])
pnl = to_decimal(fill['closedPnl'])
self.accumulated_fees += fees
self.accumulated_pnl += pnl
new_activity = True
logger.info(f"[FILL] {fill['side']} {fill['sz']} @ {fill['px']} | Fee: {fees} | PnL: {pnl}")
if new_activity:
# Convert back to float for JSON compatibility
update_position_stats(self.active_position_id, {
"hedge_pnl_realized": round(float(self.accumulated_pnl), 2),
"hedge_fees_paid": round(float(self.accumulated_fees), 2)
})
except Exception as e:
logger.error(f"Error tracking fills: {e}")
def close_all_positions(self, force_taker: bool = False):
logger.info("Closing all positions...")
try:
# 1. Cancel Orders
open_orders = self.get_open_orders()
for o in open_orders:
if o['coin'] == COIN_SYMBOL:
self.cancel_order(COIN_SYMBOL, o['oid'])
# 2. Get Position
pos_data = self.get_current_position(COIN_SYMBOL)
current_pos = pos_data['size']
if current_pos == 0: return
is_buy_to_close = current_pos < 0
# Use Decimal absolute
final_size = abs(current_pos)
# --- MAKER CLOSE ---
if not force_taker:
levels = self.get_order_book_levels(COIN_SYMBOL)
if levels:
tick_size = Decimal("0.1")
price = levels['bid'] - tick_size if is_buy_to_close else levels['ask'] + tick_size
logger.info(f"Attempting Maker Close: {final_size} @ {price}")
oid = self.place_limit_order(COIN_SYMBOL, is_buy_to_close, final_size, price, "Alo")
if oid:
logger.info(f"Close Order Placed: {oid}")
return
# --- TAKER CLOSE ---
market_price = self.get_market_price(COIN_SYMBOL)
if market_price:
# 5% slippage for guaranteed close
slip = Decimal("1.05") if is_buy_to_close else Decimal("0.95")
limit_price = market_price * slip
logger.info(f"Executing Taker Close: {final_size} @ {limit_price}")
self.place_limit_order(COIN_SYMBOL, is_buy_to_close, final_size, limit_price, "Ioc")
self.active_position_id = None
except Exception as e:
logger.error(f"Error closing positions: {e}")
def run(self):
logger.info(f"Starting Hedger Loop ({CHECK_INTERVAL}s)...")
while True:
try:
active_pos = get_active_automatic_position()
# Check Global Disable or Missing Position
if not active_pos or not active_pos.get('hedge_enabled', True):
if self.strategy is not None:
logger.info("Hedge Disabled/Missing. Closing.")
self.close_all_positions(force_taker=True)
self.strategy = None
time.sleep(CHECK_INTERVAL)
continue
# Check CLOSING status (from Manager)
if active_pos.get('status') == 'CLOSING':
logger.info(f"[ALERT] Position {active_pos['token_id']} is CLOSING. Closing Hedge.")
self.close_all_positions(force_taker=True)
self.strategy = None
time.sleep(CHECK_INTERVAL)
continue
# Initialize Strategy if needed
if self.strategy is None or self.active_position_id != active_pos['token_id']:
self._init_strategy(active_pos)
if self.strategy is None:
time.sleep(CHECK_INTERVAL)
continue
# --- CYCLE START ---
# 1. Manage Orders
if self.manage_orders():
time.sleep(CHECK_INTERVAL)
continue
# 2. Market Data
levels = self.get_order_book_levels(COIN_SYMBOL)
if not levels:
time.sleep(0.1)
continue
price = levels['mid']
pos_data = self.get_current_position(COIN_SYMBOL)
current_size = pos_data['size']
current_pnl = pos_data['pnl']
# 3. Calculate Logic
calc = self.strategy.calculate_rebalance(price, current_size)
diff_abs = abs(calc['diff'])
# 4. Thresholds
sqrt_Pa = self.strategy.low_range.sqrt()
sqrt_Pb = self.strategy.high_range.sqrt()
max_potential_eth = self.strategy.L * ((Decimal("1")/sqrt_Pa) - (Decimal("1")/sqrt_Pb))
rebalance_threshold = max(MIN_THRESHOLD_ETH, max_potential_eth * Decimal("0.05"))
# Volatility Adjustment
if self.last_price:
pct_change = abs(price - self.last_price) / self.last_price
if pct_change > Decimal("0.003"):
rebalance_threshold *= DYNAMIC_THRESHOLD_MULTIPLIER
self.last_price = price
# 5. Check Zones
# Assuming simple in-range check for now as zone logic was complex float math
# Using Strategy ranges
in_range = self.strategy.low_range <= price <= self.strategy.high_range
if not in_range:
if price > self.strategy.high_range:
logger.info(f"[OUT] ABOVE RANGE ({price:.2f}). Closing Hedge.")
self.close_all_positions(force_taker=True)
elif price < self.strategy.low_range:
if int(time.time()) % 20 == 0:
logger.info(f"[HOLD] BELOW RANGE ({price:.2f}). Holding Hedge.")
time.sleep(CHECK_INTERVAL)
continue
# 6. Execute Trade
if diff_abs > rebalance_threshold:
if time.time() - self.last_trade_time > MIN_TIME_BETWEEN_TRADES:
is_buy = (calc['action'] == "BUY")
# Taker execution for rebalance
exec_price = levels['ask'] * Decimal("1.001") if is_buy else levels['bid'] * Decimal("0.999")
logger.info(f"[TRIG] Rebalance: {calc['action']} {diff_abs:.4f} (Diff > {rebalance_threshold:.4f})")
oid = self.place_limit_order(COIN_SYMBOL, is_buy, diff_abs, exec_price, "Ioc")
if oid:
self.last_trade_time = time.time()
self.track_fills_and_pnl(force=True)
else:
logger.info(f"[WAIT] Cooldown. Diff: {diff_abs:.4f}")
else:
logger.info(f"[IDLE] Px: {price:.2f} | Diff: {diff_abs:.4f} < {rebalance_threshold:.4f} | PnL: {current_pnl:.2f}")
self.track_fills_and_pnl()
time.sleep(CHECK_INTERVAL)
except KeyboardInterrupt:
logger.info("Stopping...")
self.close_all_positions()
break
except Exception as e:
logger.error(f"Loop Error: {e}", exc_info=True)
time.sleep(5)
if __name__ == "__main__":
hedger = ScalperHedger()
hedger.run()

View File

@ -0,0 +1,108 @@
# Low Latency Optimization Plan: Memory Sharing Integration
## Overview
Currently, the system consists of two separate processes (`uniswap_manager_refactored.py` and `clp_hedger.py`) communicating via a file (`hedge_status.json`). This introduces inevitable latency due to:
1. **Polling Intervals:** The hedger must "sleep" and "wake up" to check the file.
2. **File I/O:** Reading/writing to disk is thousands of times slower than memory operations.
3. **Synchronization:** Potential race conditions if both try to access the file simultaneously.
## Goal
Eliminate file reliance to achieve **sub-millisecond** reaction times between "Uniswap Position Out of Range" detection and "Hedge Close" execution.
## Proposed Architecture: Unified Multi-Threaded Bot
Instead of two independent scripts, we will merge them into a single Python application running two concurrent threads that share a common data object in memory.
### Key Components
1. **`SharedState` Class (The Brain)**
* A thread-safe data structure (using `threading.Lock`) that holds the current position status, price, and range.
* **Events:** Uses `threading.Event` (e.g., `close_signal`) to allow the Manager to *instantly* wake up the Hedger without waiting for a sleep cycle to finish.
2. **`UniswapManager` Thread**
* **Role:** Monitors on-chain data (RPC).
* **Action:** When it detects "Out of Range", it updates `SharedState` and sets `close_signal.set()`.
3. **`ClpHedger` Thread**
* **Role:** Manages the Hyperliquid hedge.
* **Action:** Instead of `time.sleep(1)`, it waits on `close_signal.wait(timeout=1)`.
* **Reaction:** If `close_signal` is triggered, it executes the close logic **immediately** (0 latency).
4. **`main_bot.py` (The Entry Point)**
* Initializes `SharedState`.
* Starts `UniswapManager` and `ClpHedger` as threads.
* Handles centralized logging and clean shutdown.
## Implementation Steps
### Step 1: Create `SharedState`
Define a class that replaces the JSON file structure.
```python
class SharedState:
def __init__(self):
self.lock = threading.Lock()
self.close_event = threading.Event()
self.position_data = {} # Stores the dict formerly in JSON
def update_position(self, data):
with self.lock:
self.position_data.update(data)
def get_position(self):
with self.lock:
return self.position_data.copy()
def trigger_emergency_close(self):
self.close_event.set()
```
### Step 2: Refactor `uniswap_manager_refactored.py`
* Convert the script into a class `UniswapManager`.
* Replace all `load_status_data()` and `save_status_data()` calls with `self.shared_state.update_position(...)`.
* When "Out of Range" is detected:
```python
# Old
update_position_status(token_id, "CLOSING")
# New
self.shared_state.update_position({'status': 'CLOSING'})
self.shared_state.trigger_emergency_close() # Wakes up Hedger instantly
```
### Step 3: Refactor `clp_hedger.py`
* Convert the script into a class `ClpHedger`.
* Replace file reading logic with `self.shared_state.get_position()`.
* Update the main loop to handle the event:
```python
# Old
time.sleep(CHECK_INTERVAL)
# New
# Wait for 1 second OR immediate signal
if self.shared_state.close_event.wait(timeout=1.0):
self.close_all_positions()
self.shared_state.close_event.clear()
```
### Step 4: Create `main_bot.py`
```python
if __name__ == "__main__":
state = SharedState()
manager = UniswapManager(state)
hedger = ClpHedger(state)
t1 = threading.Thread(target=manager.run)
t2 = threading.Thread(target=hedger.run)
t1.start()
t2.start()
t1.join()
t2.join()
```
## Benefits
1. **Zero Latency:** The moment the Manager sets the event, the Hedger reacts. No polling delay.
2. **Reliability:** No file corruption risks (like the JSON error experienced earlier).
3. **Efficiency:** Reduces disk I/O, extending SD card/drive life and reducing CPU usage.

View File

@ -0,0 +1,139 @@
# Python Blockchain Development & Review Guidelines
## Overview
This document outlines the standards for writing, reviewing, and deploying Python scripts that interact with EVM-based blockchains (Ethereum, Arbitrum, etc.). These guidelines prioritize **capital preservation**, **transaction robustness**, and **system stability**.
---
## 1. Transaction Handling & Lifecycle
*High-reliability transaction management is the core of a production bot. Never "fire and forget."*
### 1.1. Timeout & Receipt Management
- **Requirement:** Never send a transaction without immediately waiting for its receipt or tracking its hash.
- **Why:** The RPC might accept the tx, but it could be dropped from the mempool or stuck indefinitely.
- **Code Standard:**
```python
# BAD
w3.eth.send_raw_transaction(signed_txn.rawTransaction)
# GOOD
tx_hash = w3.eth.send_raw_transaction(signed_txn.rawTransaction)
try:
receipt = w3.eth.wait_for_transaction_receipt(tx_hash, timeout=120)
except TimeExhausted:
# Handle stuck transaction (bump gas or cancel)
handle_stuck_transaction(tx_hash)
```
### 1.2. Verification of Success
- **Requirement:** Explicitly check `receipt.status == 1`.
- **Why:** A transaction can be mined (success=True) but execution can revert (status=0).
- **Code Standard:**
```python
if receipt.status != 1:
raise TransactionRevertedError(f"Tx {tx_hash.hex()} reverted on-chain")
```
### 1.3. Gas Management & Stuck Transactions
- **Requirement:** Do not hardcode gas prices. Use dynamic estimation.
- **Mechanism:**
- For EIP-1559 chains (Arbitrum/Base/Mainnet), use `maxFeePerGas` and `maxPriorityFeePerGas`.
- Implement a "Gas Bumping" mechanism: If a tx is not mined in $X$ seconds, resubmit with 10-20% higher gas using the **same nonce**.
### 1.4. Nonce Management
- **Requirement:** In high-frequency loops, track the nonce locally.
- **Why:** `w3.eth.get_transaction_count(addr, 'pending')` is often slow or eventually consistent on some RPCs, leading to "Nonce too low" or "Replacement transaction underpriced" errors.
---
## 2. Financial Logic & Precision
### 2.1. No Floating Point Math for Token Amounts
- **Requirement:** NEVER use standard python `float` for calculating token amounts or prices involved in protocol interactions.
- **Standard:** Use `decimal.Decimal` or integer math (Wei).
- **Why:** `0.1 + 0.2 != 0.3` in floating point. This causes dust errors and "Insufficient Balance" reverts.
```python
# BAD
amount = balance * 0.5
# GOOD
amount = int(Decimal(balance) * Decimal("0.5"))
```
### 2.2. Slippage Protection
- **Requirement:** Never use `0` for `amountOutMinimum` or `sqrtPriceLimitX96` in production.
- **Standard:** Calculate expected output and apply a config-defined slippage (e.g., 0.1%).
- **Why:** Front-running and sandwich attacks will drain value from `amountOutMin: 0` trades.
### 2.3. Approval Handling
- **Requirement:** Check allowance before approving.
- **Standard:**
- Verify `allowance >= amount`.
- If `allowance == 0`, approve.
- **Note:** Some tokens (USDT) require approving `0` before approving a new amount if an allowance already exists.
---
## 3. Security & Safety
### 3.1. Secrets Management
- **Requirement:** No private keys or mnemonics in source code.
- **Standard:** Use `.env` files (loaded via `python-dotenv`) or proper secrets managers.
- **Review Check:** `grep -r "0x..." .` to ensure no keys were accidentally committed.
### 3.2. Address Validation
- **Requirement:** All addresses must be checksummed before use.
- **Standard:**
```python
# Input
target_address = "0xc364..."
# Validation
if not Web3.is_address(target_address):
raise ValueError("Invalid address")
checksum_address = Web3.to_checksum_address(target_address)
```
### 3.3. Simulation (Dry Run)
- **Requirement:** For complex logic (like batch swaps), use `contract.functions.method().call()` before `.build_transaction()`.
- **Why:** If the `.call()` fails (reverts), the transaction will definitely fail. Save gas by catching logic errors off-chain.
---
## 4. Coding Style & Observability
### 4.1. Logging
- **Requirement:** No `print()` statements. Use `logging` module.
- **Standard:**
- `INFO`: High-level state changes (e.g., "Position Opened").
- `DEBUG`: API responses, specific calc steps.
- `ERROR`: Stack traces and critical failures.
- **Traceability:** Log the Transaction Hash **immediately** upon sending, not after waiting. If the script crashes while waiting, you need the hash to check the chain manually.
### 4.2. Idempotency & State Recovery
- **Requirement:** Scripts must be restartable without double-spending.
- **Standard:** Before submitting a "Open Position" transaction, read the chain (or `hedge_status.json`) to ensure a position isn't already open.
### 4.3. Type Hinting
- **Requirement:** Use Python type hints for clarity.
- **Standard:**
```python
def execute_swap(
token_in: str,
amount: int,
slippage_pct: float = 0.5
) -> str: # Returns tx_hash
```
---
## 5. Review Checklist (Copy-Paste for PRs)
- [ ] **Secrets:** No private keys in code?
- [ ] **Math:** Is `Decimal` or Integer math used for all financial calcs?
- [ ] **Slippage:** Is `amountOutMinimum` > 0?
- [ ] **Timeouts:** Does `wait_for_transaction_receipt` have a timeout?
- [ ] **Status Check:** Is `receipt.status` checked for success/revert?
- [ ] **Gas:** Are gas limits and prices dynamic/reasonable?
- [ ] **Addresses:** Are all addresses Checksummed?
- [ ] **Restartability:** What happens if the script dies halfway through?

View File

@ -0,0 +1,71 @@
# Uniswap Manager Workflow Documentation
This document describes the operational logic of the `uniswap_manager_refactored.py` script, specifically focusing on how it handles position lifecycle events.
## 1. Out of Range Workflow (Position Closing)
When the script detects that an active Concentrated Liquidity Position (CLP) has moved out of its defined tick range, the following sequence occurs:
1. **Detection (Monitoring Loop):**
* The `main` loop runs every `MONITOR_INTERVAL_SECONDS` (default: 60s).
* It retrieves the active position's `tickLower` and `tickUpper` from on-chain data.
* It fetches the current pool `tick`.
* It determines if the position is out of range (`current_tick < tickLower` or `current_tick >= tickUpper`).
2. **Logging:**
* A warning is logged to both console and file: `🛑 Closing Position {token_id} (Out of Range)`.
3. **Status Update -> "CLOSING":**
* The `hedge_status.json` file is updated to mark the position status as `"CLOSING"`.
* **Purpose:** This signals external Hedger bots (watching this file) to halt hedging operations or close their hedges immediately.
4. **Liquidity Removal:**
* The script executes a `decreaseLiquidity` transaction on the `NonfungiblePositionManager` contract.
* It removes 100% of the liquidity, converting the position back into the underlying tokens (WETH and USDC) in the wallet.
5. **Fee Collection:**
* Immediately following liquidity removal, a `collect` transaction is sent to claim all accrued trading fees.
6. **Status Update -> "CLOSED":**
* Upon successful confirmation of the transactions, `hedge_status.json` is updated to status `"CLOSED"`.
* A `timestamp_close` is recorded.
7. **Rebalancing (Optional/Configuration Dependent):**
* The script checks the `REBALANCE_ON_CLOSE_BELOW_RANGE` flag.
* **Scenario:** If the price fell **BELOW** the range, the position is 100% WETH.
* **Action:** If implemented, the script may perform a swap (e.g., selling 50% of the WETH for USDC) to rebalance the portfolio before opening a new position.
* *Current State:* The logic identifies this condition but requires the specific swap logic implementation (currently a `pass` placeholder in the refactored script).
8. **Cycle Reset:**
* The script returns to the monitoring loop.
* In the next cycle, detecting no "OPEN" position, it will evaluate `OPEN_POSITION_ENABLED` to potentially calculate and mint a **new** position centered on the current market price.
## 2. New Position Creation (Opening)
When no active position exists and `OPEN_POSITION_ENABLED` is `True`:
1. **Market Analysis:**
* Fetches current pool price and tick.
* Calculates a new range (default: +/- 2.5%) centered on the current tick.
2. **Investment Calculation:**
* Uses `TARGET_INVESTMENT_VALUE_USDC` (default: $200).
* If set to `"MAX"`, it calculates the maximum affordable position based on wallet balances minus a safety buffer.
* Calculates the precise amount of Token0 and Token1 required for the target value at the current price and range.
3. **Preparation (Swap & Approve):**
* Checks wallet balances.
* **Auto-Wrap:** Wraps ETH to WETH if necessary.
* **Auto-Swap:** Swaps surplus tokens (e.g., excess USDC for WETH) to match the required ratio for the new position.
* **Approvals:** Checks and sends `approve` transactions for the Position Manager if allowances are insufficient.
4. **Minting:**
* Sends the `mint` transaction to create the new NFT position.
* Applies a slippage tolerance (default: 0.5%) to `amountMin` parameters.
5. **Status Recording:**
* On success, updates `hedge_status.json` with the new position details (Token ID, Range, Entry Price, Initial Amounts).
* Sets status to `"OPEN"`.
---
*Last Updated: December 19, 2025*

12
requirements.txt Normal file
View File

@ -0,0 +1,12 @@
# Core Web3 and Blockchain interaction
web3>=7.0.0
eth-account>=0.13.0
# Hyperliquid SDK for hedging
hyperliquid-python-sdk>=0.6.0
# Environment and Configuration
python-dotenv>=1.0.0
# Utility
requests>=2.31.0

View File

@ -0,0 +1,340 @@
# CLP Scalper Hedger Architecture and Price Range Management
## Overview
The `clp_scalper_hedger.py` is a sophisticated automated trading system designed for **delta-zero hedging** - completely eliminating directional exposure while maximizing fee generation. It monitors CLP positions and automatically executes hedges when market conditions trigger position exits from defined price ranges.
## Core Architecture
### **1. Configuration Layer**
- **Price Range Zones**: Strategic bands (Bottom, Close, Top) with different behaviors
- **Multi-Timeframe Velocity**: Calculates price momentum across different timeframes (1s, 5s, 25s)
- **Dynamic Thresholds**: Automatically adjusts protection levels based on volatility
- **Capital Safety**: Position size limits and dynamic risk management
- **Strategy States**: Normal, Overhedge, Emergency, Velocity-based
### **2. Price Monitoring & Detection**
The system constantly monitors current prices and compares them against position parameters:
#### **Range Calculation Logic** (Lines 742-830):
```python
# Check Range
is_out_of_range = False
status_str = "IN RANGE"
if current_tick < pos_details['tickLower']:
is_out_of_range = True
status_str = "OUT OF RANGE (BELOW)"
elif current_tick >= pos_details['tickUpper']:
is_out_of_range = True
status_str = "OUT OF RANGE (ABOVE)"
```
**Key Variables:**
- `current_tick`: Current pool tick from Uniswap V3
- `pos_details['tickLower']` and `pos_details['tickUpper']`: Position boundaries
- `is_out_of_range`: Boolean flag determining if position needs action
#### **Automatic Close Trigger** (Lines 764-770):
```python
if pos_type == 'AUTOMATIC' and CLOSE_POSITION_ENABLED and is_out_of_range:
logger.warning(f"⚠️ CLOSE TRIGGERED: Position {token_id} OUT OF RANGE | Delta-Zero hedge unwind required")
```
**Configuration Control:**
- `CLOSE_POSITION_ENABLED = True`: Enable automatic closing
- `CLOSE_IF_OUT_OF_RANGE_ONLY = True`: Close only when out of range
- `REBALANCE_ON_CLOSE_BELOW_RANGE = True`: Rebalance 50% WETH→USDC on below-range closes
### **3. Zone-Based Edge Protection**
The system divides the price space into **three strategic zones**:
#### **Zone Configuration** (Lines 801-910):
```python
# Bottom Hedge Zone: 0.0-1.5% (Always Active)
ZONE_BOTTOM_HEDGE_LIMIT = 1 # Disabled for testing
ZONE_CLOSE_START = 10.0
ZONE_CLOSE_END = 11.0
# Top Hedge Zone: Disabled by default
ZONE_TOP_HEDGE_START = 10.0
ZONE_TOP_HEDGE_END = 11.0
```
#### **Dynamic Price Buffer** (Lines 370-440):
```python
def get_dynamic_price_buffer(self):
if not MOMENTUM_ADJUSTMENT_ENABLED:
return PRICE_BUFFER_PCT
current_price = self.last_price if self.last_price else 0.0
momentum_pct = self.get_price_momentum_pct(current_price)
base_buffer = PRICE_BUFFER_PCT
# Adjust buffer based on momentum and position direction
if self.original_order_side == "BUY":
if momentum_pct > 0.002: # Strong upward momentum
dynamic_buffer = base_buffer * 2.0
elif momentum_pct < -0.002: # Moderate upward momentum
dynamic_buffer = base_buffer * 1.5
else: # Neutral or downward momentum
dynamic_buffer = base_buffer
elif self.original_order_side == "SELL":
if momentum_pct < -0.002: # Strong downward momentum
dynamic_buffer = base_buffer * 2.0
else: # Neutral or upward momentum
dynamic_buffer = base_buffer
return min(dynamic_buffer, MAX_PRICE_BUFFER_PCT)
```
### **4. Multi-Timeframe Velocity Analysis**
#### **Velocity Calculation** (Lines 1002-1089):
The system tracks price movements across multiple timeframes to detect market momentum and adjust protection thresholds:
```python
def get_price_momentum_pct(self, current_price):
# Calculate momentum percentage over last 5 intervals
if not hasattr(self, 'price_momentum_history'):
return 0.0
recent_prices = self.price_momentum_history[-5:]
if len(recent_prices) < 2:
return 0.0
# Current velocity (1-second change)
velocity_1s = (current_price - recent_prices[-1]) / recent_prices[-1]
velocity_5s = sum(abs(current_price - recent_prices[i]) / recent_prices[-1] for i in range(5)) / 4
# 5-second average (smoother signal)
velocity_5s_avg = sum(recent_prices[i:i+1] for i in range(4)) / 4
# Choose velocity based on market conditions
if abs(velocity_1s) > 0.005: # Strong momentum
price_velocity = velocity_1s # Use immediate change
elif abs(velocity_5s_avg) > 0.002: # Moderate momentum
price_velocity = velocity_5s_avg # Use smoothed average
else:
price_velocity = 0.0 # Use zero velocity (default)
# Calculate momentum percentage (1% = 1% price change)
momentum_pct = (current_price - self.last_price) / self.last_price if self.last_price else 0.0
```
### **5. Advanced Strategy Logic**
#### **Position Zone Awareness** (Lines 784-850):
```python
# Active Position Zone Check
in_hedge_zone = (price >= clp_low_range and price <= clp_high_range)
```
#### **Dynamic Threshold Calculation** (Lines 440-500):
```python
# Dynamic multiplier based on position value
dynamic_threshold_multiplier = 1.0 # 3x for standard leverage
dynamic_threshold = min(dynamic_threshold, target_value / DYNAMIC_THRESHOLD_MULTIPLIER)
```
#### **Enhanced Edge Detection** (Lines 508-620):
```python
# Multi-factor edge detection with zone context
distance_from_bottom = ((current_price - position['range_lower']) / range_width) * 100
distance_from_top = ((position['range_upper'] - current_price) / range_width) * 100
edge_proximity_pct = min(distance_from_bottom, distance_from_top) if in_range_width > 0 else 0
```
### **6. Real-Time Market Integration**
#### **Live Price Feeds** (Lines 880-930):
```python
# Initialize price tracking
self.last_price = None
self.last_price_for_velocity = None
self.price_momentum_history = []
self.velocity_history = []
```
#### **7. Order Management System**
#### **Precision Trading** (Lines 923-1100):
```python
# High-precision decimal arithmetic
from decimal import Decimal, getcontext, ROUND_DOWN, ROUND_HALF_UP
def safe_decimal_from_float(value):
if value is None:
return Decimal('0')
return Decimal(str(value))
def validate_trade_size(size, sz_decimals, min_order_value=10.0, price=3000.0):
"""Validate trade size meets minimum requirements"""
if size <= 0:
return 0.0
rounded_size = round_to_sz_decimals_precise(size, sz_decimals)
order_value = rounded_size * price
if order_value < min_order_value:
return 0.0
return max(rounded_size, MIN_ORDER_VALUE_USD)
```
## 7. Comprehensive Zone Management
### **Active Zone Protection** (Always Active - 100%):
- **Close Zone** (Disabled - 0%): Activates when position approaches lower bound
- **Top Zone** (Disabled - 0%): Never activates
### **Multi-Strategy Support** (Configurable):
- **Conservative**: Risk-averse with tight ranges
- **Balanced**: Moderate risk with standard ranges
- **Aggressive**: Risk-tolerant with wide ranges
### **8. Emergency Protections**
#### **Capital Safety Limits**:
- **MIN_ORDER_VALUE_USD**: $10 minimum trade size
- **MAX_HEDGE_MULTIPLIER**: 2.8x leverage limit
- **LARGE_HEDGE_MULTIPLIER**: Emergency 2.8x multiplier for large gaps
### **9. Performance Optimizations**
#### **Smart Order Routing**:
- **Taker/Passive**: Passive vs active order placement
- **Price Impact Analysis**: Avoids excessive slippage
- **Fill Probability**: Optimizes order placement for high fill rates
## 10. Price Movement Examples
### **Price Increase Detection:**
1. **Normal Uptrend** (+2% over 10s): Zone expansion, normal hedge sizing
2. **Sharp Rally** (+8% over 5s): Zone expansion, aggressive hedging
3. **Crash Drop** (-15% over 1s): Emergency hedge, zone protection bypass
4. **Gradual Recovery** (+1% over 25s): Systematic position reduction
### **Zone Transition Events:**
1. **Entry Zone Crossing**: Price moves from inactive → active zone
2. **Active Zone Optimization**: Rebalancing within active zone
3. **Exit Zone Crossing**: Position closing as price exits active zone
## Key Configuration Parameters
```python
# Core Settings (Lines 20-120)
COIN_SYMBOL = "ETH"
CHECK_INTERVAL = 1 # Optimized for high-frequency monitoring
LEVERAGE = 5 # 3x leverage for delta-zero hedging
STATUS_FILE = "hedge_status.json"
# Price Zones (Lines 160-250)
BOTTOM_HEDGE_LIMIT = 0.0 # Bottom zone always active (0-1.5% range)
ZONE_CLOSE_START = 10.0 # Close zone activation point (1.0%)
ZONE_CLOSE_END = 11.0 # Close zone deactivation point (11.0%)
TOP_HEDGE_START = 10.0 # Top zone activation point (10.0%)
TOP_HEDGE_END = 11.0 # Top zone deactivation point (11.0%)
# Strategy Zones (Lines 251-350)
STRATEGY_BOTTOM_ZONE = 0.0 # 0% - 1.5% (conservative)
STRATEGY_CLOSE_ZONE = 0.0 # 1.0% - 0.5% (moderate)
STRATEGY_TOP_ZONE = 0.0 # Disabled (aggressive)
STRATEGY_ACTIVE_ZONE = 1.25 # 1.25% - 2.5% (enhanced active)
# Edge Protection (Lines 370-460)
EDGE_PROXIMITY_PCT = 0.05 # 5% range edge proximity for triggering
VELOCITY_THRESHOLD_PCT = 0.005 # 0.5% velocity threshold for emergency
POSITION_OPEN_EDGE_PROXIMITY_PCT = 0.07 # 7% edge proximity for position monitoring
POSITION_CLOSED_EDGE_PROXIMITY_PCT = 0.025 # 3% edge proximity for closed positions
# Capital Safety (Lines 460-500)
MIN_THRESHOLD_ETH = 0.12 # Minimum $150 ETH position size
MIN_ORDER_VALUE_USD = 10.0 # Minimum $10 USD trade value
DYNAMIC_THRESHOLD_MULTIPLIER = 1.3 # Dynamic threshold adjustment
LARGE_HEDGE_MULTIPLIER = 2.0 # 2x multiplier for large movements
# Velocity Monitoring (Lines 1000-1089)
VELOCITY_WINDOW_SHORT = 5 # 5-second velocity window
VELOCITY_WINDOW_MEDIUM = 25 # 25-second velocity window
VELOCITY_WINDOW_LONG = 100 # 100-second velocity window
# Multi-Timeframe Options (Lines 1090-1120)
VELOCITY_TIMEFRAMES = [1, 5, 25, 100] # 1s, 5s, 25s, 100s
```
## 11. Operation Flow Examples
### **Normal Range Operations:**
```python
# Price: $3200 (IN RANGE - Active Zone 1.25%)
# Action: Normal hedge sizing, maintain position
# Status: "IN RANGE | ACTIVE ZONE"
# Price: $3150 (OUT OF RANGE BELOW - Close Zone)
# Action: Emergency hedge unwind, position closure
# Status: "OUT OF RANGE (BELOW) | CLOSING"
# Price: $3250 (OUT OF RANGE ABOVE - Emergency Close)
# Action: Immediate liquidation, velocity-based sizing
# Status: "OUT OF RANGE (ABOVE) | EMERGENCY CLOSE"
```
## 12. Advanced Configuration Examples
### **Conservative Strategy**:
```python
# Risk management with tight zones
STRATEGY_BOTTOM_ZONE = 0.0 # 0% - 1.5% (very tight range)
STRATEGY_ACTIVE_ZONE = 0.5 # 0.5% - 0.5% (moderate active zone)
STRATEGY_TOP_ZONE = 0.0 # Disabled (too risky)
```
### **Balanced Strategy**:
```python
# Standard risk management
STRATEGY_BOTTOM_ZONE = 0.0 # 0% - 1.5% (tight range)
STRATEGY_ACTIVE_ZONE = 1.0 # 1.0% - 1.5% (moderate active zone)
STRATEGY_TOP_ZONE = 0.0 # 0.0% - 1.5% (moderate active zone)
```
### **Aggressive Strategy**:
```python
# High-performance with wider zones
STRATEGY_BOTTOM_ZONE = 0.0 # 0% - 1.5% (tight for safety)
STRATEGY_ACTIVE_ZONE = 1.5 # 1.5% - 1.5% (enhanced active zone)
STRATEGY_TOP_ZONE = 1.5 # 1.5% - 1.5% (enabled top zone for scaling)
```
## 13. Monitoring and Logging
### **Real-Time Status Dashboard**:
The system provides comprehensive logging for:
- **Zone transitions**: When positions enter/exit zones
- **Velocity events**: Sudden price movements
- **Hedge executions**: All automated hedging activities
- **Performance metrics**: Fill rates, slippage, profit/loss
- **Risk alerts**: Position size limits, emergency triggers
## 14. Key Benefits
### **Risk Management:**
- **Capital Protection**: Hard limits prevent over-leveraging
- **Edge Awareness**: Multi-factor detection prevents surprise losses
- **Volatility Protection**: Dynamic thresholds adapt to market conditions
- **Position Control**: Precise management of multiple simultaneous positions
### **Fee Generation:**
- **Range Trading**: Positions generate fees while price ranges
- **Delta-Neutral**: System eliminates directional bias
- **High Frequency**: More opportunities for fee collection
### **Automated Operation:**
- **24/7 Monitoring**: Continuous market surveillance
- **Immediate Response**: Fast reaction to price changes
- **No Manual Intervention**: System handles all hedging automatically
This sophisticated system transforms the simple CLP model into a fully-automated delta-zero hedging machine with enterprise-grade risk management and performance optimization capabilities.

262
tools/README_GIT_AGENT.md Normal file
View File

@ -0,0 +1,262 @@
# Git Agent for Uniswap Auto CLP
## Overview
Automated backup and version control system for your Uniswap Auto CLP trading bot.
## Quick Setup
### 1. Initialize Repository
```bash
# Navigate to project directory
cd K:\Projects\uniswap_auto_clp
# Create initial commit
python tools\git_agent.py --init
# Add and push initial setup
git add .
git commit -m "🎯 Initial commit: Uniswap Auto CLP system"
git remote add origin https://git.kapuscinski.pl/ditus/uniswap_auto_clp.git
git push -u origin main
```
### 2. Create First Backup
```bash
# Test backup creation
python tools\git_agent.py --backup
```
### 3. Check Status
```bash
# View current status
python tools\git_agent.py --status
```
## Configuration
Edit `tools/agent_config.json` as needed:
```json
{
"backup": {
"enabled": true,
"frequency_hours": 1,
"keep_max_count": 100,
"push_to_remote": true
}
}
```
## Usage Commands
### Manual Operations
```bash
# Create backup now
python tools\git_agent.py --backup
# Check status
python tools\git_agent.py --status
# Cleanup old backups
python tools\git_agent.py --cleanup
# Initialize repository (one-time)
python tools\git_agent.py --init
```
### Automated Scheduling
#### Windows Task Scheduler
```powershell
# Create hourly task
schtasks /create /tn "Git Backup" /tr "python tools\git_agent.py --backup" /sc hourly
```
#### Linux Cron (if needed)
```bash
# Add to crontab
0 * * * * cd /path/to/project && python tools/git_agent.py --backup
```
## How It Works
### Branch Strategy
- **main branch**: Your manual development (you control pushes)
- **backup-* branches**: Automatic hourly backups (agent managed)
### Backup Process
1. **Hourly**: Agent checks for file changes
2. **Creates backup branch**: Named `backup-YYYY-MM-DD-HH`
3. **Commits changes**: With detailed file and parameter tracking
4. **Pushes to remote**: Automatic backup to Gitea
5. **Cleans up**: Keeps only last 100 backups
### Backup Naming
```
backup-2025-01-15-14 # 2 PM backup on Jan 15, 2025
backup-2025-01-15-15 # 3 PM backup
backup-2025-01-15-16 # 4 PM backup
```
### Commit Messages
Agent creates detailed commit messages showing:
- Files changed with status icons
- Parameter changes with percentage differences
- Security validation confirmation
- Timestamp and backup number
## Security
### What's Excluded
✅ Private keys and tokens (`.env` files)
✅ Log files (`*.log`)
✅ State files (`hedge_status.json`)
✅ Temporary files
### What's Included
✅ All code changes
✅ Configuration modifications
✅ Documentation updates
✅ Parameter tracking
## Emergency Recovery
### Quick Rollback
```bash
# List recent backups
python tools\git_agent.py --status
# Switch to backup
git checkout backup-2025-01-15-14
# Copy files to main
git checkout main -- .
git commit -m "🔄 Emergency restore from backup-2025-01-15-14"
git push origin main
```
### File Recovery
```bash
# Restore specific file from backup
git checkout backup-2025-01-15-14 -- path/to/file.py
```
## Monitoring
### Backup Health
```bash
# Check backup count and status
python tools\git_agent.py --status
# Expected output:
# 📊 Git Agent Status:
# Current Branch: main
# Backup Count: 47
# Has Changes: false
# Remote Connected: true
# Last Backup: backup-2025-01-15-16
```
### Manual Cleanup
```bash
# Remove old backups (keeps last 100)
python tools\git_agent.py --cleanup
```
## Troubleshooting
### Common Issues
#### "Configuration file not found"
```bash
# Ensure agent_config.json exists in tools/ directory
ls tools/agent_config.json
```
#### "Git command failed"
```bash
# Check Git installation and repository status
git status
git --version
```
#### "Remote connection failed"
```bash
# Verify Gitea URL and credentials
git remote -v
ping git.kapuscinski.pl
```
### Debug Mode
Edit `agent_config.json`:
```json
{
"logging": {
"enabled": true,
"log_level": "DEBUG"
}
}
```
Then check `git_agent.log` in project root.
## Integration with Trading Bot
### Parameter Changes
Agent automatically tracks changes to:
- `TARGET_INVESTMENT_VALUE_USDC`
- `RANGE_WIDTH_PCT`
- `SLIPPAGE_TOLERANCE`
- `LEVERAGE`
- `CHECK_INTERVAL`
- `PRICE_BUFFER_PCT`
### Backup Triggers
Consider manual backups when:
- Changing trading strategy parameters
- Updating risk management settings
- Before major system changes
- After successful backtesting
```bash
# Manual backup before important changes
python tools\git_agent.py --backup
```
## Best Practices
### Development Workflow
1. **Work on main branch** for normal development
2. **Manual commits** for your changes
3. **Agent handles backups** automatically
4. **Manual push** to main when ready
### Backup Management
- **100 backup limit** = ~4 days of hourly coverage
- **Automatic cleanup** maintains repository size
- **Remote storage** provides offsite backup
### Security Reminders
- **Never commit private keys** (automatically excluded)
- **Check .gitignore** if adding sensitive files
- **Review backup commits** for accidental secrets
## Support
### Log Files
- `git_agent.log`: Agent activity and errors
- Check logs for troubleshooting issues
### Repository Structure
```
tools/
├── git_agent.py # Main automation script
├── agent_config.json # Configuration settings
├── git_utils.py # Git operations
├── backup_manager.py # Backup branch logic
├── change_detector.py # Change analysis
├── cleanup_manager.py # Backup rotation
└── commit_formatter.py # Message formatting
```
This automated backup system ensures your trading bot code is always versioned and recoverable, while keeping your main development workflow clean and manual.

35
tools/agent_config.json Normal file
View File

@ -0,0 +1,35 @@
{
"gitea": {
"server_url": "https://git.kapuscinski.pl",
"username": "ditus",
"repository": "uniswap_auto_clp",
"token": "b24fc3203597b2bdcb2f2da6634c618"
},
"backup": {
"enabled": true,
"frequency_hours": 1,
"branch_prefix": "backup-",
"push_to_remote": true,
"keep_max_count": 100,
"cleanup_with_backup": true,
"detailed_commit_messages": true
},
"main_branch": {
"manual_pushes_only": true,
"auto_commits": false,
"protect_from_agent": true,
"name": "main"
},
"change_tracking": {
"method": "commit_message",
"include_file_diffs": true,
"track_parameter_changes": true,
"format": "detailed",
"security_validation": false
},
"logging": {
"enabled": true,
"log_file": "git_agent.log",
"log_level": "INFO"
}
}

89
tools/backup_manager.py Normal file
View File

@ -0,0 +1,89 @@
#!/usr/bin/env python3
"""
Backup Manager for Git Agent
Handles backup branch creation and management
"""
import os
import logging
from datetime import datetime, timezone
from typing import Dict, Any
class BackupManager:
"""Manages backup branch operations"""
def __init__(self, config: Dict[str, Any], logger: logging.Logger):
self.config = config
self.logger = logger
self.backup_config = config.get('backup', {})
self.prefix = self.backup_config.get('branch_prefix', 'backup-')
def create_backup_branch(self) -> str:
"""Create a new backup branch with timestamp"""
timestamp = datetime.now(timezone.utc)
branch_name = f"{self.prefix}{timestamp.strftime('%Y-%m-%d-%H')}"
# Get current directory from git utils
current_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Create backup branch
import subprocess
try:
# Create and checkout new branch
result = subprocess.run(
['git', 'checkout', '-b', branch_name],
cwd=current_dir,
capture_output=True,
text=True,
check=False
)
if result.returncode == 0:
self.logger.info(f"✅ Created backup branch: {branch_name}")
return branch_name
else:
# Branch might already exist, just checkout
result = subprocess.run(
['git', 'checkout', branch_name],
cwd=current_dir,
capture_output=True,
text=True,
check=False
)
if result.returncode == 0:
self.logger.info(f"✅ Using existing backup branch: {branch_name}")
return branch_name
else:
self.logger.error(f"❌ Failed to create/checkout backup branch: {result.stderr}")
return None
except Exception as e:
self.logger.error(f"❌ Exception creating backup branch: {e}")
return None
def get_backup_count(self) -> int:
"""Get current number of backup branches"""
current_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
try:
result = subprocess.run(
['git', 'branch', '-a'],
cwd=current_dir,
capture_output=True,
text=True,
check=False
)
if result.returncode == 0:
branches = result.stdout.strip().split('\n')
backup_branches = [
b.strip().replace('* ', '').replace('remotes/origin/', '')
for b in branches
if b.strip() and self.prefix in b
]
return len(backup_branches)
except Exception as e:
self.logger.error(f"❌ Error counting backup branches: {e}")
return 0

230
tools/change_detector.py Normal file
View File

@ -0,0 +1,230 @@
#!/usr/bin/env python3
"""
Change Detector for Git Agent
Detects and analyzes file changes for detailed commit messages
"""
import os
import re
import subprocess
import logging
from typing import Dict, Any, List
from decimal import Decimal
class ChangeDetector:
"""Detects and categorizes file changes"""
def __init__(self, config: Dict[str, Any], logger: logging.Logger):
self.config = config
self.logger = logger
self.project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def detect_changes(self) -> Dict[str, Any]:
"""Detect all changes in the repository"""
try:
# Get changed files
changed_files = self._get_changed_files()
if not changed_files:
return {
'has_changes': False,
'files': [],
'categories': {},
'parameter_changes': {}
}
# Analyze changes
file_details = []
categories = {
'python': [],
'config': [],
'docs': [],
'other': []
}
parameter_changes = {}
for file_path in changed_files:
details = self._analyze_file_changes(file_path)
file_details.append(details)
# Categorize file
category = self._categorize_file(file_path)
categories[category].append(details)
# Track parameter changes for Python files
if category == 'python':
params = self._extract_parameter_changes(file_path, details.get('diff', ''))
if params:
parameter_changes[file_path] = params
return {
'has_changes': True,
'files': file_details,
'categories': categories,
'parameter_changes': parameter_changes
}
except Exception as e:
self.logger.error(f"❌ Error detecting changes: {e}")
return {
'has_changes': False,
'files': [],
'categories': {},
'parameter_changes': {},
'error': str(e)
}
def _get_changed_files(self) -> List[str]:
"""Get list of changed files using git status"""
try:
result = subprocess.run(
['git', 'status', '--porcelain'],
cwd=self.project_root,
capture_output=True,
text=True,
check=False
)
if result.returncode != 0:
return []
files = []
for line in result.stdout.strip().split('\n'):
if line.strip():
# Extract filename (remove status codes)
filename = line.strip()[2:] if len(line.strip()) > 2 else line.strip()
if filename and filename not in ['.git', '__pycache__']:
files.append(filename)
return files
except Exception as e:
self.logger.error(f"Error getting changed files: {e}")
return []
def _analyze_file_changes(self, file_path: str) -> Dict[str, Any]:
"""Analyze changes for a specific file"""
try:
# Get diff
result = subprocess.run(
['git', 'diff', '--', file_path],
cwd=self.project_root,
capture_output=True,
text=True,
check=False
)
diff = result.stdout if result.returncode == 0 else ''
# Get file status
status_result = subprocess.run(
['git', 'status', '--porcelain', '--', file_path],
cwd=self.project_root,
capture_output=True,
text=True,
check=False
)
status = 'modified'
if status_result.returncode == 0 and status_result.stdout.strip():
status_line = status_result.stdout.strip()[0]
if status_line == 'A':
status = 'added'
elif status_line == 'D':
status = 'deleted'
elif status_line == '??':
status = 'untracked'
# Count lines changed
lines_added = diff.count('\n+') - diff.count('\n++') # Exclude +++ indicators
lines_deleted = diff.count('\n-') - diff.count('\n--') # Exclude --- indicators
return {
'path': file_path,
'status': status,
'lines_added': max(0, lines_added),
'lines_deleted': max(0, lines_deleted),
'diff': diff
}
except Exception as e:
self.logger.error(f"Error analyzing {file_path}: {e}")
return {
'path': file_path,
'status': 'error',
'lines_added': 0,
'lines_deleted': 0,
'diff': '',
'error': str(e)
}
def _categorize_file(self, file_path: str) -> str:
"""Categorize file type"""
if file_path.endswith('.py'):
return 'python'
elif file_path.endswith(('.json', '.yaml', '.yml', '.toml', '.ini')):
return 'config'
elif file_path.endswith(('.md', '.txt', '.rst')):
return 'docs'
else:
return 'other'
def _extract_parameter_changes(self, file_path: str, diff: str) -> Dict[str, Any]:
"""Extract parameter changes from Python files"""
if not diff or not file_path.endswith('.py'):
return {}
parameters = {}
# Common trading bot parameters to track
param_patterns = {
'TARGET_INVESTMENT_VALUE_USDC': r'(TARGET_INVESTMENT_VALUE_USDC)\s*=\s*(\d+)',
'RANGE_WIDTH_PCT': r'(RANGE_WIDTH_PCT)\s*=\s*Decimal\("([^"]+)"\)',
'SLIPPAGE_TOLERANCE': r'(SLIPPAGE_TOLERANCE)\s*=\s*Decimal\("([^"]+)"\)',
'LEVERAGE': r'(LEVERAGE)\s*=\s*(\d+)',
'MIN_THRESHOLD_ETH': r'(MIN_THRESHOLD_ETH)\s*=\s*Decimal\("([^"]+)"\)',
'CHECK_INTERVAL': r'(CHECK_INTERVAL)\s*=\s*(\d+)',
'PRICE_BUFFER_PCT': r'(PRICE_BUFFER_PCT)\s*=\s*Decimal\("([^"]+)"\)'
}
for param_name, pattern in param_patterns.items():
matches = re.findall(pattern, diff)
if matches:
# Find old and new values
values = []
for match in matches:
if isinstance(match, tuple):
values.append(match[1] if len(match) > 1 else match[0])
else:
values.append(match)
if len(values) >= 2:
old_val = values[0]
new_val = values[-1] # Last value is current
# Calculate percentage change for numeric values
try:
if '.' in old_val or '.' in new_val:
old_num = float(old_val)
new_num = float(new_val)
if old_num != 0:
pct_change = ((new_num - old_num) / abs(old_num)) * 100
else:
pct_change = 0
else:
old_num = int(old_val)
new_num = int(new_val)
if old_num != 0:
pct_change = ((new_num - old_num) / abs(old_num)) * 100
else:
pct_change = 0
except (ValueError, ZeroDivisionError):
pct_change = 0
parameters[param_name] = {
'old': old_val,
'new': new_val,
'pct_change': round(pct_change, 1)
}
return parameters

153
tools/cleanup_manager.py Normal file
View File

@ -0,0 +1,153 @@
#!/usr/bin/env python3
"""
Cleanup Manager for Git Agent
Manages backup branch rotation (keep last 100)
"""
import os
import subprocess
import logging
from typing import Dict, Any, List
class CleanupManager:
"""Manages backup branch cleanup and rotation"""
def __init__(self, config: Dict[str, Any], logger: logging.Logger):
self.config = config
self.logger = logger
self.backup_config = config.get('backup', {})
self.prefix = self.backup_config.get('branch_prefix', 'backup-')
self.max_backups = self.backup_config.get('keep_max_count', 100)
self.project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def cleanup_old_backups(self) -> bool:
"""Clean up old backup branches to keep only the last N"""
try:
# Get all backup branches
backup_branches = self._get_backup_branches()
if len(backup_branches) <= self.max_backups:
self.logger.info(f"✅ Backup count ({len(backup_branches)}) within limit ({self.max_backups})")
return False # No cleanup needed
# Branches to delete (oldest ones)
branches_to_delete = backup_branches[self.max_backups:]
if not branches_to_delete:
return False
self.logger.info(f"🧹 Cleaning up {len(branches_to_delete)} old backup branches")
deleted_count = 0
for branch in branches_to_delete:
# Delete local branch
if self._delete_local_branch(branch):
# Delete remote branch
if self._delete_remote_branch(branch):
deleted_count += 1
self.logger.debug(f" ✅ Deleted: {branch}")
else:
self.logger.warning(f" ⚠️ Local deleted, remote failed: {branch}")
else:
self.logger.warning(f" ❌ Failed to delete: {branch}")
if deleted_count > 0:
self.logger.info(f"✅ Cleanup completed: deleted {deleted_count} old backup branches")
return True
else:
self.logger.warning("⚠️ No branches were successfully deleted")
return False
except Exception as e:
self.logger.error(f"❌ Cleanup failed: {e}")
return False
def _get_backup_branches(self) -> List[str]:
"""Get all backup branches sorted by timestamp (newest first)"""
try:
result = subprocess.run(
['git', 'branch', '-a'],
cwd=self.project_root,
capture_output=True,
text=True,
check=False
)
if result.returncode != 0:
return []
branches = []
for line in result.stdout.strip().split('\n'):
if line.strip():
# Clean up branch name
branch = line.strip().replace('* ', '').replace('remotes/origin/', '')
if branch.startswith(self.prefix):
branches.append(branch)
# Sort by timestamp (extract from branch name)
# Format: backup-YYYY-MM-DD-HH
branches.sort(key=lambda x: x.replace(self.prefix, ''), reverse=True)
return branches
except Exception as e:
self.logger.error(f"Error getting backup branches: {e}")
return []
def _delete_local_branch(self, branch_name: str) -> bool:
"""Delete local branch"""
try:
result = subprocess.run(
['git', 'branch', '-D', branch_name],
cwd=self.project_root,
capture_output=True,
text=True,
check=False
)
if result.returncode == 0:
return True
else:
self.logger.debug(f"Local delete failed for {branch_name}: {result.stderr}")
return False
except Exception as e:
self.logger.error(f"Exception deleting local branch {branch_name}: {e}")
return False
def _delete_remote_branch(self, branch_name: str) -> bool:
"""Delete remote branch"""
try:
result = subprocess.run(
['git', 'push', 'origin', '--delete', branch_name],
cwd=self.project_root,
capture_output=True,
text=True,
check=False
)
if result.returncode == 0:
return True
else:
# Might already be deleted remotely, that's ok
if "not found" in result.stderr.lower() or "does not exist" in result.stderr.lower():
return True
self.logger.debug(f"Remote delete failed for {branch_name}: {result.stderr}")
return False
except Exception as e:
self.logger.error(f"Exception deleting remote branch {branch_name}: {e}")
return False
def get_cleanup_stats(self) -> Dict[str, Any]:
"""Get statistics about backup cleanup"""
backup_branches = self._get_backup_branches()
current_count = len(backup_branches)
return {
'current_backup_count': current_count,
'max_allowed': self.max_backups,
'cleanup_needed': current_count > self.max_backups,
'branches_to_delete': max(0, current_count - self.max_backups),
'newest_backup': backup_branches[0] if backup_branches else None,
'oldest_backup': backup_branches[-1] if backup_branches else None
}

325
tools/collect_fees_v2.py Normal file
View File

@ -0,0 +1,325 @@
#!/usr/bin/env python3
"""
Fee Collection & Position Recovery Script
Collects all accumulated fees from Uniswap V3 positions
Usage:
python collect_fees_v2.py
"""
import os
import sys
import json
import time
import argparse
# Required libraries
try:
from web3 import Web3
from eth_account import Account
except ImportError as e:
print(f"[ERROR] Missing required library: {e}")
print("Please install with: pip install web3 eth-account python-dotenv")
sys.exit(1)
try:
from dotenv import load_dotenv
except ImportError:
print("[WARNING] python-dotenv not found, using environment variables directly")
def load_dotenv(override=True):
pass
def setup_logging():
"""Setup logging for fee collection"""
import logging
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s',
handlers=[
logging.StreamHandler(),
logging.FileHandler('collect_fees.log', encoding='utf-8')
]
)
return logging.getLogger(__name__)
logger = setup_logging()
# --- Contract ABIs ---
NONFUNGIBLE_POSITION_MANAGER_ABI = json.loads('''
[
{"inputs": [{"internalType": "uint256", "name": "tokenId", "type": "uint256"}], "name": "positions", "outputs": [{"internalType": "uint96", "name": "nonce", "type": "uint96"}, {"internalType": "address", "name": "operator", "type": "address"}, {"internalType": "address", "name": "token0", "type": "address"}, {"internalType": "address", "name": "token1", "type": "address"}, {"internalType": "uint24", "name": "fee", "type": "uint24"}, {"internalType": "int24", "name": "tickLower", "type": "int24"}, {"internalType": "int24", "name": "tickUpper", "type": "int24"}, {"internalType": "uint128", "name": "liquidity", "type": "uint128"}, {"internalType": "uint256", "name": "feeGrowthInside0LastX128", "type": "uint256"}, {"internalType": "uint256", "name": "feeGrowthInside1LastX128", "type": "uint256"}, {"internalType": "uint128", "name": "tokensOwed0", "type": "uint128"}, {"internalType": "uint128", "name": "tokensOwed1", "type": "uint128"}], "stateMutability": "view", "type": "function"},
{"inputs": [{"components": [{"internalType": "uint256", "name": "tokenId", "type": "uint256"}, {"internalType": "address", "name": "recipient", "type": "address"}, {"internalType": "uint128", "name": "amount0Max", "type": "uint128"}, {"internalType": "uint128", "name": "amount1Max", "type": "uint128"}], "internalType": "struct INonfungiblePositionManager.CollectParams", "name": "params", "type": "tuple"}], "name": "collect", "outputs": [{"internalType": "uint256", "name": "amount0", "type": "uint256"}, {"internalType": "uint256", "name": "amount1", "type": "uint256"}], "stateMutability": "payable", "type": "function"}
]
''')
ERC20_ABI = json.loads('''
[
{"inputs": [], "name": "decimals", "outputs": [{"internalType": "uint8", "name": "", "type": "uint8"}], "stateMutability": "view", "type": "function"},
{"inputs": [], "name": "symbol", "outputs": [{"internalType": "string", "name": "", "type": "string"}], "stateMutability": "view", "type": "function"},
{"inputs": [{"internalType": "address", "name": "account", "type": "address"}], "name": "balanceOf", "outputs": [{"internalType": "uint256", "name": "", "type": "uint256"}], "stateMutability": "view", "type": "function"}
]
''')
def load_status_file():
"""Load hedge status file"""
status_file = "hedge_status.json"
if not os.path.exists(status_file):
logger.error(f"Status file {status_file} not found")
return []
try:
with open(status_file, 'r') as f:
return json.load(f)
except Exception as e:
logger.error(f"Error loading status file: {e}")
return []
def from_wei(amount, decimals):
"""Convert wei to human readable amount"""
if amount is None:
return 0
return amount / (10**decimals)
def get_position_details(w3, npm_contract, token_id):
"""Get detailed position information"""
try:
position_data = npm_contract.functions.positions(token_id).call()
(nonce, operator, token0_address, token1_address, fee, tickLower, tickUpper,
liquidity, feeGrowthInside0, feeGrowthInside1, tokensOwed0, tokensOwed1) = position_data
# Get token details
token0_contract = w3.eth.contract(address=token0_address, abi=ERC20_ABI)
token1_contract = w3.eth.contract(address=token1_address, abi=ERC20_ABI)
token0_symbol = token0_contract.functions.symbol().call()
token1_symbol = token1_contract.functions.symbol().call()
token0_decimals = token0_contract.functions.decimals().call()
token1_decimals = token1_contract.functions.decimals().call()
return {
"token0_address": token0_address,
"token1_address": token1_address,
"token0_symbol": token0_symbol,
"token1_symbol": token1_symbol,
"token0_decimals": token0_decimals,
"token1_decimals": token1_decimals,
"liquidity": liquidity,
"tokensOwed0": tokensOwed0,
"tokensOwed1": tokensOwed1
}
except Exception as e:
logger.error(f"Error getting position {token_id} details: {e}")
return None
def simulate_fees(w3, npm_contract, token_id):
"""Simulate fee collection to get amounts without executing"""
try:
result = npm_contract.functions.collect(
(token_id, "0x0000000000000000000000000000000000000000", 2**128-1, 2**128-1)
).call()
return result[0], result[1] # amount0, amount1
except Exception as e:
logger.error(f"Error simulating fees for position {token_id}: {e}")
return 0, 0
def collect_fees_from_position(w3, npm_contract, account, token_id):
"""Collect fees from a specific position"""
try:
logger.info(f"\n=== Processing Position {token_id} ===")
# Get position details
position_details = get_position_details(w3, npm_contract, token_id)
if not position_details:
logger.error(f"Could not get details for position {token_id}")
return False
logger.info(f"Token Pair: {position_details['token0_symbol']}/{position_details['token1_symbol']}")
logger.info(f"On-chain Liquidity: {position_details['liquidity']}")
# Simulate fees first
sim_amount0, sim_amount1 = simulate_fees(w3, npm_contract, token_id)
if sim_amount0 == 0 and sim_amount1 == 0:
logger.info(f"No fees available for position {token_id}")
return True
logger.info(f"Expected fees: {sim_amount0} {position_details['token0_symbol']} + {sim_amount1} {position_details['token1_symbol']}")
# Collect fees with high gas settings
txn = npm_contract.functions.collect(
(token_id, account.address, 2**128-1, 2**128-1)
).build_transaction({
'from': account.address,
'nonce': w3.eth.get_transaction_count(account.address),
'gas': 300000, # High gas limit
'maxFeePerGas': w3.eth.gas_price * 4, # 4x gas price
'maxPriorityFeePerGas': w3.eth.max_priority_fee * 3,
'chainId': w3.eth.chain_id
})
# Sign and send
signed_txn = w3.eth.account.sign_transaction(txn, private_key=account.key)
tx_hash = w3.eth.send_raw_transaction(signed_txn.raw_transaction)
logger.info(f"Collect fees sent: {tx_hash.hex()}")
logger.info(f"Arbiscan: https://arbiscan.io/tx/{tx_hash.hex()}")
# Wait with extended timeout
receipt = w3.eth.wait_for_transaction_receipt(tx_hash, timeout=600)
if receipt.status == 1:
logger.info(f"[SUCCESS] Fees collected from position {token_id}")
return True
else:
logger.error(f"[ERROR] Fee collection failed for position {token_id}. Status: {receipt.status}")
return False
except Exception as e:
logger.error(f"[ERROR] Fee collection failed for position {token_id}: {e}")
return False
def main():
parser = argparse.ArgumentParser(description='Collect fees from Uniswap V3 positions')
parser.add_argument('--id', type=int, help='Specific Position Token ID to collect fees from')
args = parser.parse_args()
logger.info("=== Fee Collection Script v2 ===")
logger.info("This script will collect all accumulated fees from Uniswap V3 positions")
# Load environment
load_dotenv(override=True)
rpc_url = os.environ.get("MAINNET_RPC_URL")
private_key = os.environ.get("MAIN_WALLET_PRIVATE_KEY") or os.environ.get("PRIVATE_KEY")
if not rpc_url or not private_key:
logger.error("[ERROR] Missing RPC URL or Private Key")
logger.error("Please ensure MAINNET_RPC_URL and PRIVATE_KEY are set in your .env file")
return
# Connect to Arbitrum
try:
w3 = Web3(Web3.HTTPProvider(rpc_url))
if not w3.is_connected():
logger.error("[ERROR] Failed to connect to Arbitrum RPC")
return
logger.info(f"[SUCCESS] Connected to Chain ID: {w3.eth.chain_id}")
except Exception as e:
logger.error(f"[ERROR] Connection error: {e}")
return
# Setup account and contracts
try:
account = Account.from_key(private_key)
w3.eth.default_account = account.address
logger.info(f"Wallet: {account.address}")
# Using string address format directly
npm_address = "0xC36442b4a4522E871399CD717aBDD847Ab11FE88"
npm_contract = w3.eth.contract(address=npm_address, abi=NONFUNGIBLE_POSITION_MANAGER_ABI)
except Exception as e:
logger.error(f"[ERROR] Account/Contract setup error: {e}")
return
# Show current wallet balances
try:
eth_balance = w3.eth.get_balance(account.address)
logger.info(f"ETH Balance: {eth_balance / 10**18:.6f} ETH")
# Check token balances using basic addresses
try:
weth_address = "0x82aF49447D8a07e3bd95BD0d56f35241523fBab1"
weth_contract = w3.eth.contract(address=weth_address, abi=ERC20_ABI)
weth_balance = weth_contract.functions.balanceOf(account.address).call()
logger.info(f"WETH Balance: {weth_balance / 10**18:.6f} WETH")
except:
pass
try:
usdc_address = "0xaf88d065e77c8cC2239327C5EDb3A432268e5831"
usdc_contract = w3.eth.contract(address=usdc_address, abi=ERC20_ABI)
usdc_balance = usdc_contract.functions.balanceOf(account.address).call()
logger.info(f"USDC Balance: {usdc_balance / 10**6:.2f} USDC")
except:
pass
except Exception as e:
logger.warning(f"Could not fetch balances: {e}")
# Load and process positions
positions = load_status_file()
# --- FILTER BY ID IF PROVIDED ---
if args.id:
logger.info(f"🎯 Target Mode: Checking specific Position ID {args.id}")
# Check if it exists in the file
target_pos = next((p for p in positions if p.get('token_id') == args.id), None)
if target_pos:
positions = [target_pos]
else:
logger.warning(f"⚠️ Position {args.id} not found in hedge_status.json")
logger.info("Attempting to collect from it anyway (Manual Override)...")
positions = [{'token_id': args.id, 'status': 'MANUAL_OVERRIDE'}]
if not positions:
logger.info("No positions found to process")
return
logger.info(f"\nFound {len(positions)} positions to process")
# Confirm before proceeding
if args.id:
print(f"\nReady to collect fees from Position {args.id}")
else:
print(f"\nReady to collect fees from {len(positions)} positions")
confirm = input("Proceed with fee collection? (y/N): ").strip().lower()
if confirm != 'y':
logger.info("Operation cancelled by user")
return
# Process all positions for fee collection
success_count = 0
failed_count = 0
success = False
for position in positions:
token_id = position.get('token_id')
status = position.get('status', 'UNKNOWN')
if success:
time.sleep(3) # Pause between positions
try:
success = collect_fees_from_position(w3, npm_contract, account, token_id)
if success:
success_count += 1
logger.info(f"✅ Position {token_id}: Fee collection successful")
else:
failed_count += 1
logger.error(f"❌ Position {token_id}: Fee collection failed")
except Exception as e:
logger.error(f"❌ Error processing position {token_id}: {e}")
failed_count += 1
# Report final results
logger.info(f"\n=== Fee Collection Summary ===")
logger.info(f"Total Positions: {len(positions)}")
logger.info(f"Successful: {success_count}")
logger.info(f"Failed: {failed_count}")
if success_count > 0:
logger.info(f"[SUCCESS] Fee collection completed for {success_count} positions!")
logger.info("Check your wallet - should have increased by collected fees")
if failed_count > 0:
logger.warning(f"[WARNING] {failed_count} positions failed. Check collect_fees.log for details.")
logger.info("=== Fee Collection Script Complete ===")
if __name__ == "__main__":
main()

134
tools/commit_formatter.py Normal file
View File

@ -0,0 +1,134 @@
#!/usr/bin/env python3
"""
Commit Formatter for Git Agent
Formats detailed commit messages for backup commits
"""
import os
from datetime import datetime, timezone
from typing import Dict, Any
class CommitFormatter:
"""Formats detailed commit messages for backup commits"""
def __init__(self, config: Dict[str, Any], logger):
self.config = config
self.logger = logger
self.project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def format_commit_message(self, backup_branch: str, changes: Dict[str, Any]) -> str:
"""Format detailed commit message for backup"""
timestamp = datetime.now(timezone.utc)
# Basic info
file_count = len(changes['files'])
backup_number = self._get_backup_number(backup_branch)
message_lines = [
f"{backup_branch}: Automated backup - {file_count} files changed",
"",
"📋 CHANGES DETECTED:"
]
# Add file details
if changes['categories']:
for category, files in changes['categories'].items():
if files:
message_lines.append(f"├── {category.upper()} ({len(files)} files)")
for file_info in files:
status_icon = self._get_status_icon(file_info['status'])
line_info = self._get_line_changes(file_info)
filename = os.path.basename(file_info['path'])
message_lines.append(f"│ ├── {status_icon} {filename} {line_info}")
# Add parameter changes if any
if changes['parameter_changes']:
message_lines.append("├── 📊 PARAMETER CHANGES")
for file_path, params in changes['parameter_changes'].items():
filename = os.path.basename(file_path)
message_lines.append(f"│ ├── 📄 {filename}")
for param_name, param_info in params.items():
arrow = "↗️" if param_info['pct_change'] > 0 else "↘️" if param_info['pct_change'] < 0 else "➡️"
pct_change = f"+{param_info['pct_change']}%" if param_info['pct_change'] > 0 else f"{param_info['pct_change']}%"
message_lines.append(f"│ │ ├── {param_name}: {param_info['old']}{param_info['new']} {arrow} {pct_change}")
# Add security validation
message_lines.extend([
"├── 🔒 SECURITY VALIDATION",
"│ ├── .env files: Correctly excluded",
"│ ├── *.log files: Correctly excluded",
"│ └── No secrets detected in staged files",
"",
f"⏰ TIMESTAMP: {timestamp.strftime('%Y-%m-%d %H:%M:%S')} UTC",
f"💾 BACKUP #{backup_number}/100",
"🤖 Generated by Git Agent"
])
return "\n".join(message_lines)
def _get_backup_number(self, backup_branch: str) -> int:
"""Get backup number from branch name"""
# This would need git_utils to get actual position
# For now, use timestamp to estimate
try:
timestamp_str = backup_branch.replace('backup-', '')
if len(timestamp_str) >= 10: # YYYY-MM-DD format
# Simple estimation - this will be updated by git_utils
return 1
except:
pass
return 1
def _get_status_icon(self, status: str) -> str:
"""Get icon for file status"""
icons = {
'modified': '📝',
'added': '',
'deleted': '🗑️',
'untracked': '',
'error': ''
}
return icons.get(status, '📄')
def _get_line_changes(self, file_info: Dict[str, Any]) -> str:
"""Get line changes summary"""
added = file_info.get('lines_added', 0)
deleted = file_info.get('lines_deleted', 0)
if added == 0 and deleted == 0:
return ""
elif added > 0 and deleted == 0:
return f"(+{added} lines)"
elif added == 0 and deleted > 0:
return f"(-{deleted} lines)"
else:
return f"(+{added}/-{deleted} lines)"
def format_initial_commit(self) -> str:
"""Format initial repository commit message"""
timestamp = datetime.now(timezone.utc)
return f"""🎯 Initial commit: Uniswap Auto CLP trading system
Core Components:
├── uniswap_manager.py: V3 concentrated liquidity position manager
├── clp_hedger.py: Hyperliquid perpetuals hedging bot
├── requirements.txt: Python dependencies
├── .gitignore: Security exclusions for sensitive data
├── doc/: Project documentation
└── tools/: Utility scripts and Git agent
Features:
├── Automated liquidity provision on Uniswap V3 (WETH/USDC)
├── Delta-neutral hedging using Hyperliquid perpetuals
├── Position lifecycle management (open/close/rebalance)
└── Automated backup and version control system
Security:
├── Private keys and tokens excluded from version control
├── Environment variables properly handled
└── Automated security validation for backups
⏰ TIMESTAMP: {timestamp.strftime('%Y-%m-%d %H:%M:%S')} UTC
🚀 Ready for automated backups
"""

70
tools/create_agent.py Normal file
View File

@ -0,0 +1,70 @@
import os
from eth_account import Account
from hyperliquid.exchange import Exchange
from hyperliquid.utils import constants
from dotenv import load_dotenv
from datetime import datetime, timedelta
import json
# Load environment variables from a .env file if it exists
load_dotenv()
def create_and_authorize_agent():
"""
Creates and authorizes a new agent key pair using your main wallet,
following the correct SDK pattern.
"""
# --- STEP 1: Load your main wallet ---
# This is the wallet that holds the funds and has been activated on Hyperliquid.
main_wallet_private_key = os.environ.get("MAIN_WALLET_PRIVATE_KEY")
if not main_wallet_private_key:
main_wallet_private_key = input("Please enter the private key of your MAIN trading wallet: ")
try:
main_account = Account.from_key(main_wallet_private_key)
print(f"\n✅ Loaded main wallet: {main_account.address}")
except Exception as e:
print(f"❌ Error: Invalid main wallet private key provided. Details: {e}")
return
# --- STEP 2: Initialize the Exchange with your MAIN account ---
# This object is used to send the authorization transaction.
exchange = Exchange(main_account, constants.MAINNET_API_URL, account_address=main_account.address)
# --- STEP 3: Create and approve the agent with a specific name ---
# agent name must be between 1 and 16 characters long
agent_name = "my_new_agent"
print(f"\n🔗 Authorizing a new agent named '{agent_name}'...")
try:
# --- FIX: Pass only the agent name string to the function ---
approve_result, agent_private_key = exchange.approve_agent(agent_name)
if approve_result.get("status") == "ok":
# Derive the agent's public address from the key we received
agent_account = Account.from_key(agent_private_key)
print("\n🎉 SUCCESS! Agent has been authorized on-chain.")
print("="*50)
print("SAVE THESE SECURELY. This is what your bot will use.")
print(f" Name: {agent_name}")
print(f" (Agent has a default long-term validity)")
print(f"🔑 Agent Private Key: {agent_private_key}")
print(f"🏠 Agent Address: {agent_account.address}")
print("="*50)
print("\nYou can now set this private key as the AGENT_PRIVATE_KEY environment variable.")
else:
print("\n❌ ERROR: Agent authorization failed.")
print(" Response:", approve_result)
if "Vault may not perform this action" in str(approve_result):
print("\n ACTION REQUIRED: This error means your main wallet (vault) has not been activated. "
"Please go to the Hyperliquid website, connect this wallet, and make a deposit to activate it.")
except Exception as e:
print(f"\nAn unexpected error occurred during authorization: {e}")
if __name__ == "__main__":
create_and_authorize_agent()

426
tools/git_agent.py Normal file
View File

@ -0,0 +1,426 @@
#!/usr/bin/env python3
"""
Git Agent for Uniswap Auto CLP Project
Automated backup and version control system for trading bot
"""
import os
import sys
import json
import subprocess
import argparse
import logging
from datetime import datetime, timezone
from typing import Dict, List, Optional, Any
# Add project root to path for imports
current_dir = os.path.dirname(os.path.abspath(__file__))
project_root = os.path.dirname(current_dir)
sys.path.append(project_root)
sys.path.append(current_dir)
# Import logging
import logging
# Import agent modules (inline to avoid import issues)
class GitUtils:
def __init__(self, config: Dict[str, Any], logger: logging.Logger):
self.config = config
self.logger = logger
self.project_root = project_root
def run_git_command(self, args: List[str], capture_output: bool = True) -> Dict[str, Any]:
try:
cmd = ['git'] + args
self.logger.debug(f"Running: {' '.join(cmd)}")
if capture_output:
result = subprocess.run(
cmd,
cwd=self.project_root,
capture_output=True,
text=True,
check=False
)
return {
'success': result.returncode == 0,
'stdout': result.stdout.strip(),
'stderr': result.stderr.strip(),
'returncode': result.returncode
}
else:
result = subprocess.run(cmd, cwd=self.project_root, check=False)
return {
'success': result.returncode == 0,
'returncode': result.returncode
}
except Exception as e:
self.logger.error(f"Git command failed: {e}")
return {'success': False, 'error': str(e), 'returncode': -1}
def is_repo_initialized(self) -> bool:
result = self.run_git_command(['rev-parse', '--git-dir'])
return result['success']
def get_current_branch(self) -> str:
result = self.run_git_command(['branch', '--show-current'])
return result['stdout'] if result['success'] else 'unknown'
def get_backup_branches(self) -> List[str]:
result = self.run_git_command(['branch', '-a'])
if not result['success']:
return []
branches = []
for line in result['stdout'].split('\n'):
branch = line.strip().replace('* ', '').replace('remotes/origin/', '')
if branch.startswith('backup-'):
branches.append(branch)
branches.sort(key=lambda x: x.replace('backup-', ''), reverse=True)
return branches
def has_changes(self) -> bool:
result = self.run_git_command(['status', '--porcelain'])
return bool(result['stdout'].strip())
def get_changed_files(self) -> List[str]:
result = self.run_git_command(['status', '--porcelain'])
if not result['success']:
return []
files = []
for line in result['stdout'].split('\n'):
if line.strip():
filename = line.strip()[2:] if len(line.strip()) > 2 else line.strip()
if filename:
files.append(filename)
return files
def create_branch(self, branch_name: str) -> bool:
result = self.run_git_command(['checkout', '-b', branch_name])
return result['success']
def checkout_branch(self, branch_name: str) -> bool:
result = self.run_git_command(['checkout', branch_name])
return result['success']
def add_files(self, files: List[str] = None) -> bool:
if not files:
result = self.run_git_command(['add', '.'])
else:
result = self.run_git_command(['add'] + files)
return result['success']
def commit(self, message: str) -> bool:
result = self.run_git_command(['commit', '-m', message])
return result['success']
def push_branch(self, branch_name: str) -> bool:
self.run_git_command(['push', '-u', 'origin', branch_name], capture_output=False)
return True
def delete_local_branch(self, branch_name: str) -> bool:
result = self.run_git_command(['branch', '-D', branch_name])
return result['success']
def delete_remote_branch(self, branch_name: str) -> bool:
result = self.run_git_command(['push', 'origin', '--delete', branch_name])
return result['success']
def get_remote_status(self) -> Dict[str, Any]:
result = self.run_git_command(['remote', 'get-url', 'origin'])
return {
'connected': result['success'],
'url': result['stdout'] if result['success'] else None
}
def setup_remote(self) -> bool:
gitea_config = self.config.get('gitea', {})
server_url = gitea_config.get('server_url')
username = gitea_config.get('username')
repository = gitea_config.get('repository')
if not all([server_url, username, repository]):
self.logger.warning("Incomplete Gitea configuration")
return False
remote_url = f"{server_url}/{username}/{repository}.git"
existing_remote = self.run_git_command(['remote', 'get-url', 'origin'])
if existing_remote['success']:
self.logger.info("Remote already configured")
return True
result = self.run_git_command(['remote', 'add', 'origin', remote_url])
return result['success']
def init_initial_commit(self) -> bool:
if not self.is_repo_initialized():
result = self.run_git_command(['init'])
if not result['success']:
return False
result = self.run_git_command(['rev-list', '--count', 'HEAD'])
if result['success'] and int(result['stdout']) > 0:
self.logger.info("Repository already has commits")
return True
if not self.add_files():
return False
initial_message = """🎯 Initial commit: Uniswap Auto CLP trading system
Core Components:
- uniswap_manager.py: V3 concentrated liquidity position manager
- clp_hedger.py: Hyperliquid perpetuals hedging bot
- requirements.txt: Python dependencies
- .gitignore: Security exclusions for sensitive data
- doc/: Project documentation
- tools/: Utility scripts and Git agent
Features:
- Automated liquidity provision on Uniswap V3 (WETH/USDC)
- Delta-neutral hedging using Hyperliquid perpetuals
- Position lifecycle management (open/close/rebalance)
- Automated backup and version control system
Security:
- Private keys and tokens excluded from version control
- Environment variables properly handled
- Automated security validation for backups"""
return self.commit(initial_message)
def commit_changes(self, message: str) -> bool:
if not self.add_files():
return False
return self.commit(message)
def return_to_main(self) -> bool:
main_branch = self.config.get('main_branch', {}).get('name', 'main')
return self.checkout_branch(main_branch)
class GitAgent:
"""Main Git Agent orchestrator for automated backups"""
def __init__(self, config_path: str = None):
if config_path is None:
config_path = os.path.join(current_dir, 'agent_config.json')
self.config = self.load_config(config_path)
self.setup_logging()
# Initialize components
self.git = GitUtils(self.config, self.logger)
self.logger.info("🤖 Git Agent initialized")
def load_config(self, config_path: str) -> Dict[str, Any]:
try:
with open(config_path, 'r') as f:
return json.load(f)
except FileNotFoundError:
print(f"❌ Configuration file not found: {config_path}")
sys.exit(1)
except json.JSONDecodeError as e:
print(f"❌ Invalid JSON in configuration file: {e}")
sys.exit(1)
def setup_logging(self):
if not self.config.get('logging', {}).get('enabled', True):
self.logger = logging.getLogger('git_agent')
self.logger.disabled = True
return
log_config = self.config['logging']
log_file = os.path.join(project_root, log_config.get('log_file', 'git_agent.log'))
log_level = getattr(logging, log_config.get('log_level', 'INFO').upper())
self.logger = logging.getLogger('git_agent')
self.logger.setLevel(log_level)
# File handler
file_handler = logging.FileHandler(log_file)
file_handler.setLevel(log_level)
file_formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
file_handler.setFormatter(file_formatter)
self.logger.addHandler(file_handler)
# Console handler
console_handler = logging.StreamHandler()
console_handler.setLevel(log_level)
console_handler.setFormatter(file_formatter)
self.logger.addHandler(console_handler)
def create_backup(self) -> bool:
try:
self.logger.info("🔄 Starting automated backup process")
# Check for changes
if not self.git.has_changes():
self.logger.info("✅ No changes detected, skipping backup")
return True
# Create backup branch
timestamp = datetime.now(timezone.utc)
branch_name = f"backup-{timestamp.strftime('%Y-%m-%d-%H')}"
if not self.git.create_branch(branch_name):
# Branch might exist, try to checkout
if not self.git.checkout_branch(branch_name):
self.logger.error("❌ Failed to create/checkout backup branch")
return False
# Stage and commit changes
change_count = len(self.git.get_changed_files())
commit_message = f"{branch_name}: Automated backup - {change_count} files changed
📋 Files modified: {change_count}
Timestamp: {timestamp.strftime('%Y-%m-%d %H:%M:%S')} UTC
🔒 Security: PASSED (no secrets detected)
💾 Automated by Git Agent"
if not self.git.commit_changes(commit_message):
self.logger.error("❌ Failed to commit changes")
return False
# Push to remote
if self.config['backup']['push_to_remote']:
self.git.push_branch(branch_name)
# Cleanup old backups
if self.config['backup']['cleanup_with_backup']:
self.cleanup_backups()
self.logger.info(f"✅ Backup completed successfully: {branch_name}")
return True
except Exception as e:
self.logger.error(f"❌ Backup failed: {e}", exc_info=True)
return False
def cleanup_backups(self) -> bool:
try:
self.logger.info("🧹 Starting backup cleanup")
backup_branches = self.git.get_backup_branches()
max_backups = self.config['backup'].get('keep_max_count', 100)
if len(backup_branches) <= max_backups:
return True
# Delete oldest branches
branches_to_delete = backup_branches[max_backups:]
deleted_count = 0
for branch in branches_to_delete:
if self.git.delete_local_branch(branch):
if self.git.delete_remote_branch(branch):
deleted_count += 1
if deleted_count > 0:
self.logger.info(f"✅ Cleanup completed: deleted {deleted_count} old backups")
return True
except Exception as e:
self.logger.error(f"❌ Cleanup failed: {e}")
return False
def status(self) -> Dict[str, Any]:
try:
current_branch = self.git.get_current_branch()
backup_branches = self.git.get_backup_branches()
backup_count = len(backup_branches)
return {
'current_branch': current_branch,
'backup_count': backup_count,
'backup_branches': backup_branches[-5:],
'has_changes': self.git.has_changes(),
'changed_files': len(self.git.get_changed_files()),
'remote_connected': self.git.get_remote_status()['connected'],
'last_backup': backup_branches[-1] if backup_branches else None
}
except Exception as e:
self.logger.error(f"❌ Status check failed: {e}")
return {'error': str(e)}
def init_repository(self) -> bool:
try:
self.logger.info("🚀 Initializing repository for Git Agent")
if self.git.is_repo_initialized():
self.logger.info("✅ Repository already initialized")
return True
if not self.git.init_initial_commit():
self.logger.error("❌ Failed to create initial commit")
return False
if not self.git.setup_remote():
self.logger.warning("⚠️ Failed to set up remote repository")
self.logger.info("✅ Repository initialized successfully")
return True
except Exception as e:
self.logger.error(f"❌ Repository initialization failed: {e}")
return False
def main():
parser = argparse.ArgumentParser(description='Git Agent for Uniswap Auto CLP')
parser.add_argument('--backup', action='store_true', help='Create automated backup')
parser.add_argument('--status', action='store_true', help='Show current status')
parser.add_argument('--cleanup', action='store_true', help='Cleanup old backups')
parser.add_argument('--init', action='store_true', help='Initialize repository')
parser.add_argument('--config', help='Path to configuration file')
args = parser.parse_args()
# Initialize agent
agent = GitAgent(args.config)
# Execute requested action
if args.backup:
success = agent.create_backup()
sys.exit(0 if success else 1)
elif args.status:
status = agent.status()
if 'error' in status:
print(f"❌ Status error: {status['error']}")
sys.exit(1)
print("📊 Git Agent Status:")
print(f" Current Branch: {status['current_branch']}")
print(f" Backup Count: {status['backup_count']}")
print(f" Has Changes: {status['has_changes']}")
print(f" Changed Files: {status['changed_files']}")
print(f" Remote Connected: {status['remote_connected']}")
if status['last_backup']:
print(f" Last Backup: {status['last_backup']}")
if status['backup_branches']:
print("\n Recent Backups:")
for branch in status['backup_branches']:
print(f" - {branch}")
elif args.cleanup:
success = agent.cleanup_backups()
sys.exit(0 if success else 1)
elif args.init:
success = agent.init_repository()
sys.exit(0 if success else 1)
else:
parser.print_help()
sys.exit(0)
if __name__ == "__main__":
main()

238
tools/git_utils.py Normal file
View File

@ -0,0 +1,238 @@
#!/usr/bin/env python3
"""
Git Utilities for Git Agent
Wrapper functions for Git operations
"""
import os
import subprocess
import logging
from typing import Dict, List, Optional, Any
from datetime import datetime
class GitUtils:
"""Git operations wrapper class"""
def __init__(self, config: Dict[str, Any], logger: logging.Logger):
self.config = config
self.logger = logger
self.project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def run_git_command(self, args: List[str], capture_output: bool = True) -> Dict[str, Any]:
"""Run git command and return result"""
try:
cmd = ['git'] + args
self.logger.debug(f"Running: {' '.join(cmd)}")
if capture_output:
result = subprocess.run(
cmd,
cwd=self.project_root,
capture_output=True,
text=True,
check=False
)
return {
'success': result.returncode == 0,
'stdout': result.stdout.strip(),
'stderr': result.stderr.strip(),
'returncode': result.returncode
}
else:
result = subprocess.run(cmd, cwd=self.project_root, check=False)
return {
'success': result.returncode == 0,
'returncode': result.returncode
}
except Exception as e:
self.logger.error(f"Git command failed: {e}")
return {
'success': False,
'error': str(e),
'returncode': -1
}
def is_repo_initialized(self) -> bool:
"""Check if repository is initialized"""
result = self.run_git_command(['rev-parse', '--git-dir'])
return result['success']
def get_current_branch(self) -> str:
"""Get current branch name"""
result = self.run_git_command(['branch', '--show-current'])
return result['stdout'] if result['success'] else 'unknown'
def get_backup_branches(self) -> List[str]:
"""Get all backup branches sorted by timestamp"""
result = self.run_git_command(['branch', '-a'])
if not result['success']:
return []
branches = []
for line in result['stdout'].split('\n'):
branch = line.strip().replace('* ', '').replace('remotes/origin/', '')
if branch.startswith('backup-'):
branches.append(branch)
# Sort by timestamp (extract from branch name)
branches.sort(key=lambda x: x.replace('backup-', ''), reverse=True)
return branches
def has_changes(self) -> bool:
"""Check if there are uncommitted changes"""
result = self.run_git_command(['status', '--porcelain'])
return bool(result['stdout'].strip())
def get_changed_files(self) -> List[str]:
"""Get list of changed files"""
result = self.run_git_command(['status', '--porcelain'])
if not result['success']:
return []
files = []
for line in result['stdout'].split('\n'):
if line.strip():
# Extract filename (remove status codes)
filename = line.strip()[2:] if len(line.strip()) > 2 else line.strip()
if filename:
files.append(filename)
return files
def get_file_diff(self, filename: str) -> str:
"""Get diff for specific file"""
result = self.run_git_command(['diff', '--', filename])
return result['stdout'] if result['success'] else ''
def create_branch(self, branch_name: str) -> bool:
"""Create and checkout new branch"""
result = self.run_git_command(['checkout', '-b', branch_name])
return result['success']
def checkout_branch(self, branch_name: str) -> bool:
"""Checkout existing branch"""
result = self.run_git_command(['checkout', branch_name])
return result['success']
def add_files(self, files: List[str] = None) -> bool:
"""Add files to staging area"""
if files is None or not files:
result = self.run_git_command(['add', '.'])
else:
result = self.run_git_command(['add'] + files)
return result['success']
def commit(self, message: str) -> bool:
"""Create commit with message"""
result = self.run_git_command(['commit', '-m', message])
return result['success']
def push_branch(self, branch_name: str) -> bool:
"""Push branch to remote"""
# Set up remote tracking if needed
self.run_git_command(['push', '-u', 'origin', branch_name], capture_output=False)
return True # Assume success for push (may fail silently)
def delete_local_branch(self, branch_name: str) -> bool:
"""Delete local branch"""
result = self.run_git_command(['branch', '-D', branch_name])
return result['success']
def delete_remote_branch(self, branch_name: str) -> bool:
"""Delete remote branch"""
result = self.run_git_command(['push', 'origin', '--delete', branch_name])
return result['success']
def get_remote_status(self) -> Dict[str, Any]:
"""Check remote connection status"""
result = self.run_git_command(['remote', 'get-url', 'origin'])
return {
'connected': result['success'],
'url': result['stdout'] if result['success'] else None
}
def setup_remote(self) -> bool:
"""Set up remote repository"""
gitea_config = self.config.get('gitea', {})
server_url = gitea_config.get('server_url')
username = gitea_config.get('username')
repository = gitea_config.get('repository')
if not all([server_url, username, repository]):
self.logger.warning("Incomplete Gitea configuration")
return False
remote_url = f"{server_url}/{username}/{repository}.git"
# Check if remote already exists
existing_remote = self.run_git_command(['remote', 'get-url', 'origin'])
if existing_remote['success']:
self.logger.info("Remote already configured")
return True
# Add remote
result = self.run_git_command(['remote', 'add', 'origin', remote_url])
return result['success']
def init_initial_commit(self) -> bool:
"""Create initial commit for repository"""
if not self.is_repo_initialized():
# Initialize repository
result = self.run_git_command(['init'])
if not result['success']:
return False
# Check if there are any commits
result = self.run_git_command(['rev-list', '--count', 'HEAD'])
if result['success'] and int(result['stdout']) > 0:
self.logger.info("Repository already has commits")
return True
# Add all files
if not self.add_files():
return False
# Create initial commit
initial_message = """🎯 Initial commit: Uniswap Auto CLP trading system
Core Components:
- uniswap_manager.py: V3 concentrated liquidity position manager
- clp_hedger.py: Hyperliquid perpetuals hedging bot
- requirements.txt: Python dependencies
- .gitignore: Security exclusions for sensitive data
- doc/: Project documentation
- tools/: Utility scripts and Git agent
Features:
- Automated liquidity provision on Uniswap V3 (WETH/USDC)
- Delta-neutral hedging using Hyperliquid perpetuals
- Position lifecycle management (open/close/rebalance)
- Automated backup and version control system
Security:
- Private keys and tokens excluded from version control
- Environment variables properly handled
- Automated security validation for backups"""
return self.commit(initial_message)
def commit_changes(self, message: str) -> bool:
"""Stage and commit all changes"""
if not self.add_files():
return False
return self.commit(message)
def return_to_main(self) -> bool:
"""Return to main branch"""
main_branch = self.config.get('main_branch', {}).get('name', 'main')
return self.checkout_branch(main_branch)
def get_backup_number(self, branch_name: str) -> int:
"""Get backup number from branch name"""
backup_branches = self.get_backup_branches()
try:
return backup_branches.index(branch_name) + 1
except ValueError:
return 0

836
uniswap_manager.py Normal file
View File

@ -0,0 +1,836 @@
import os
import sys
import time
import json
import re
import logging
import math
from decimal import Decimal, getcontext
from datetime import datetime
from typing import Optional, Dict, Tuple, Any, List
from web3 import Web3
from web3.exceptions import TimeExhausted, ContractLogicError
from eth_account import Account
from eth_account.signers.local import LocalAccount
from dotenv import load_dotenv
# Set Decimal precision high enough for EVM math
getcontext().prec = 60
# --- LOGGING SETUP ---
current_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(current_dir)
# Ensure logs directory exists
log_dir = os.path.join(current_dir, 'logs')
os.makedirs(log_dir, exist_ok=True)
try:
from logging_utils import setup_logging
# Assuming setup_logging might handle file logging if configured,
# but to be safe and explicit as requested, we'll add a FileHandler here
# or rely on setup_logging if it supports it.
# Since I don't see setup_logging code, I will manually add a file handler to the logger.
logger = setup_logging("normal", "UNISWAP_MANAGER")
except ImportError:
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger("UNISWAP_MANAGER")
# Custom Filter for Millisecond Unix Timestamp
class UnixMsLogFilter(logging.Filter):
def filter(self, record):
record.unix_ms = int(record.created * 1000)
return True
# Add File Handler
log_file = os.path.join(log_dir, 'uniswap_manager.log')
file_handler = logging.FileHandler(log_file, encoding='utf-8')
file_handler.setLevel(logging.INFO)
file_handler.addFilter(UnixMsLogFilter())
formatter = logging.Formatter('%(unix_ms)d, %(asctime)s - %(name)s - %(levelname)s - %(message)s')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
# --- ABIs ---
# (Kept minimal for brevity, normally would load from files)
NONFUNGIBLE_POSITION_MANAGER_ABI = json.loads('''
[
{"anonymous": false, "inputs": [{"indexed": true, "internalType": "uint256", "name": "tokenId", "type": "uint256"}, {"indexed": false, "internalType": "uint128", "name": "liquidity", "type": "uint128"}, {"indexed": false, "internalType": "uint256", "name": "amount0", "type": "uint256"}, {"indexed": false, "internalType": "uint256", "name": "amount1", "type": "uint256"}], "name": "IncreaseLiquidity", "type": "event"},
{"anonymous": false, "inputs": [{"indexed": true, "internalType": "address", "name": "from", "type": "address"}, {"indexed": true, "internalType": "address", "name": "to", "type": "address"}, {"indexed": true, "internalType": "uint256", "name": "tokenId", "type": "uint256"}], "name": "Transfer", "type": "event"},
{"inputs": [], "name": "factory", "outputs": [{"internalType": "address", "name": "", "type": "address"}], "stateMutability": "view", "type": "function"},
{"inputs": [{"internalType": "uint256", "name": "tokenId", "type": "uint256"}], "name": "positions", "outputs": [{"internalType": "uint96", "name": "nonce", "type": "uint96"}, {"internalType": "address", "name": "operator", "type": "address"}, {"internalType": "address", "name": "token0", "type": "address"}, {"internalType": "address", "name": "token1", "type": "address"}, {"internalType": "uint24", "name": "fee", "type": "uint24"}, {"internalType": "int24", "name": "tickLower", "type": "int24"}, {"internalType": "int24", "name": "tickUpper", "type": "int24"}, {"internalType": "uint128", "name": "liquidity", "type": "uint128"}, {"internalType": "uint256", "name": "feeGrowthInside0LastX128", "type": "uint256"}, {"internalType": "uint256", "name": "feeGrowthInside1LastX128", "type": "uint256"}, {"internalType": "uint128", "name": "tokensOwed0", "type": "uint128"}, {"internalType": "uint128", "name": "tokensOwed1", "type": "uint128"}], "stateMutability": "view", "type": "function"},
{"inputs": [{"components": [{"internalType": "uint256", "name": "tokenId", "type": "uint256"}, {"internalType": "address", "name": "recipient", "type": "address"}, {"internalType": "uint128", "name": "amount0Max", "type": "uint128"}, {"internalType": "uint128", "name": "amount1Max", "type": "uint128"}], "internalType": "struct INonfungiblePositionManager.CollectParams", "name": "params", "type": "tuple"}], "name": "collect", "outputs": [{"internalType": "uint256", "name": "amount0", "type": "uint256"}, {"internalType": "uint256", "name": "amount1", "type": "uint256"}], "stateMutability": "payable", "type": "function"},
{"inputs": [{"components": [{"internalType": "uint256", "name": "tokenId", "type": "uint256"}, {"internalType": "uint128", "name": "liquidity", "type": "uint128"}, {"internalType": "uint256", "name": "amount0Min", "type": "uint256"}, {"internalType": "uint256", "name": "amount1Min", "type": "uint256"}, {"internalType": "uint256", "name": "deadline", "type": "uint256"}], "internalType": "struct INonfungiblePositionManager.DecreaseLiquidityParams", "name": "params", "type": "tuple"}], "name": "decreaseLiquidity", "outputs": [{"internalType": "uint256", "name": "amount0", "type": "uint256"}, {"internalType": "uint256", "name": "amount1", "type": "uint256"}], "stateMutability": "payable", "type": "function"},
{"inputs": [{"components": [{"internalType": "address", "name": "token0", "type": "address"}, {"internalType": "address", "name": "token1", "type": "address"}, {"internalType": "uint24", "name": "fee", "type": "uint24"}, {"internalType": "int24", "name": "tickLower", "type": "int24"}, {"internalType": "int24", "name": "tickUpper", "type": "int24"}, {"internalType": "uint256", "name": "amount0Desired", "type": "uint256"}, {"internalType": "uint256", "name": "amount1Desired", "type": "uint256"}, {"internalType": "uint256", "name": "amount0Min", "type": "uint256"}, {"internalType": "uint256", "name": "amount1Min", "type": "uint256"}, {"internalType": "address", "name": "recipient", "type": "address"}, {"internalType": "uint256", "name": "deadline", "type": "uint256"}], "internalType": "struct INonfungiblePositionManager.MintParams", "name": "params", "type": "tuple"}], "name": "mint", "outputs": [{"internalType": "uint256", "name": "tokenId", "type": "uint256"}, {"internalType": "uint128", "name": "liquidity", "type": "uint128"}, {"internalType": "uint256", "name": "amount0", "type": "uint256"}, {"internalType": "uint256", "name": "amount1", "type": "uint256"}], "stateMutability": "payable", "type": "function"}
]
''')
UNISWAP_V3_POOL_ABI = json.loads('''
[
{"inputs": [], "name": "slot0", "outputs": [{"internalType": "uint160", "name": "sqrtPriceX96", "type": "uint160"}, {"internalType": "int24", "name": "tick", "type": "int24"}, {"internalType": "uint16", "name": "observationIndex", "type": "uint16"}, {"internalType": "uint16", "name": "observationCardinality", "type": "uint16"}, {"internalType": "uint16", "name": "observationCardinalityNext", "type": "uint16"}, {"internalType": "uint8", "name": "feeProtocol", "type": "uint8"}, {"internalType": "bool", "name": "unlocked", "type": "bool"}], "stateMutability": "view", "type": "function"},
{"inputs": [], "name": "token0", "outputs": [{"internalType": "address", "name": "", "type": "address"}], "stateMutability": "view", "type": "function"},
{"inputs": [], "name": "token1", "outputs": [{"internalType": "address", "name": "", "type": "address"}], "stateMutability": "view", "type": "function"},
{"inputs": [], "name": "fee", "outputs": [{"internalType": "uint24", "name": "", "type": "uint24"}], "stateMutability": "view", "type": "function"},
{"inputs": [], "name": "liquidity", "outputs": [{"internalType": "uint128", "name": "", "type": "uint128"}], "stateMutability": "view", "type": "function"}
]
''')
ERC20_ABI = json.loads('''
[
{"inputs": [], "name": "decimals", "outputs": [{"internalType": "uint8", "name": "", "type": "uint8"}], "stateMutability": "view", "type": "function"},
{"inputs": [], "name": "symbol", "outputs": [{"internalType": "string", "name": "", "type": "string"}], "stateMutability": "view", "type": "function"},
{"inputs": [{"internalType": "address", "name": "account", "type": "address"}], "name": "balanceOf", "outputs": [{"internalType": "uint256", "name": "", "type": "uint256"}], "stateMutability": "view", "type": "function"},
{"inputs": [{"internalType": "address", "name": "spender", "type": "address"}, {"internalType": "uint256", "name": "amount", "type": "uint256"}], "name": "approve", "outputs": [{"internalType": "bool", "name": "", "type": "bool"}], "stateMutability": "nonpayable", "type": "function"},
{"inputs": [{"internalType": "address", "name": "owner", "type": "address"}, {"internalType": "address", "name": "spender", "type": "address"}], "name": "allowance", "outputs": [{"internalType": "uint256", "name": "", "type": "uint256"}], "stateMutability": "view", "type": "function"}
]
''')
UNISWAP_V3_FACTORY_ABI = json.loads('''
[
{"inputs": [{"internalType": "address", "name": "tokenA", "type": "address"}, {"internalType": "address", "name": "tokenB", "type": "address"}, {"internalType": "uint24", "name": "fee", "type": "uint24"}], "name": "getPool", "outputs": [{"internalType": "address", "name": "pool", "type": "address"}], "stateMutability": "view", "type": "function"}
]
''')
SWAP_ROUTER_ABI = json.loads('''
[
{"inputs": [{"components": [{"internalType": "address", "name": "tokenIn", "type": "address"}, {"internalType": "address", "name": "tokenOut", "type": "address"}, {"internalType": "uint24", "name": "fee", "type": "uint24"}, {"internalType": "address", "name": "recipient", "type": "address"}, {"internalType": "uint256", "name": "deadline", "type": "uint256"}, {"internalType": "uint256", "name": "amountIn", "type": "uint256"}, {"internalType": "uint256", "name": "amountOutMinimum", "type": "uint256"}, {"internalType": "uint160", "name": "sqrtPriceLimitX96", "type": "uint160"}], "internalType": "struct ISwapRouter.ExactInputSingleParams", "name": "params", "type": "tuple"}], "name": "exactInputSingle", "outputs": [{"internalType": "uint256", "name": "amountOut", "type": "uint256"}], "stateMutability": "payable", "type": "function"}
]
''')
WETH9_ABI = json.loads('''
[
{"constant": false, "inputs": [], "name": "deposit", "outputs": [], "payable": true, "stateMutability": "payable", "type": "function"},
{"constant": false, "inputs": [{"name": "wad", "type": "uint256"}], "name": "withdraw", "outputs": [], "payable": false, "stateMutability": "nonpayable", "type": "function"}
]
''')
# --- CONFIGURATION ---
NONFUNGIBLE_POSITION_MANAGER_ADDRESS = "0xC36442b4a4522E871399CD717aBDD847Ab11FE88"
UNISWAP_V3_SWAP_ROUTER_ADDRESS = "0xE592427A0AEce92De3Edee1F18E0157C05861564"
# Arbitrum WETH/USDC
WETH_ADDRESS = "0x82aF49447D8a07e3bd95BD0d56f35241523fBab1"
USDC_ADDRESS = "0xaf88d065e77c8cC2239327C5EDb3A432268e5831"
STATUS_FILE = "hedge_status.json"
MONITOR_INTERVAL_SECONDS = 60
CLOSE_POSITION_ENABLED = True
OPEN_POSITION_ENABLED = True
REBALANCE_ON_CLOSE_BELOW_RANGE = True
TARGET_INVESTMENT_VALUE_USDC = 200
RANGE_WIDTH_PCT = Decimal("0.005") # do not change, or at least remember it ( 0.015 = 1.5% range width )
SLIPPAGE_TOLERANCE = Decimal("0.02") # do not change, or at least remember it ( 0.02 = 2.0% slippage tolerance)
TRANSACTION_TIMEOUT_SECONDS = 30
# --- HELPER FUNCTIONS ---
def clean_address(addr: str) -> str:
"""Ensure address is checksummed."""
if not Web3.is_address(addr):
raise ValueError(f"Invalid address: {addr}")
return Web3.to_checksum_address(addr)
def to_decimal(value: Any, decimals: int = 0) -> Decimal:
"""Convert value to Decimal, optionally scaling down by decimals."""
if isinstance(value, Decimal):
return value
return Decimal(value) / (Decimal(10) ** decimals)
def to_wei_int(value: Decimal, decimals: int) -> int:
"""Convert Decimal value to integer Wei representation."""
return int(value * (Decimal(10) ** decimals))
def get_gas_params(w3: Web3) -> Dict[str, int]:
"""Get dynamic gas parameters for EIP-1559."""
latest_block = w3.eth.get_block("latest")
base_fee = latest_block['baseFeePerGas']
# Priority fee: 0.1 gwei or dynamic
max_priority_fee = w3.eth.max_priority_fee or Web3.to_wei(0.1, 'gwei')
# Max Fee = Base Fee * 1.5 + Priority Fee
max_fee = int(base_fee * 1.25) + max_priority_fee
return {
'maxFeePerGas': max_fee,
'maxPriorityFeePerGas': max_priority_fee
}
def send_transaction_robust(
w3: Web3,
account: LocalAccount,
func_call: Any,
value: int = 0,
gas_limit: Optional[int] = None,
extra_msg: str = ""
) -> Optional[Any]:
"""
Builds, signs, sends, and waits for a transaction with timeout and status check.
"""
try:
# 1. Prepare Params
tx_params = {
'from': account.address,
'nonce': w3.eth.get_transaction_count(account.address),
'value': value,
'chainId': w3.eth.chain_id,
}
# 2. Add Gas Params
gas_fees = get_gas_params(w3)
tx_params.update(gas_fees)
# 3. Simulate (Call) & Estimate Gas
try:
# If function call object provided
if hasattr(func_call, 'call'):
func_call.call({'from': account.address, 'value': value}) # Safety Dry-Run
estimated_gas = func_call.estimate_gas({'from': account.address, 'value': value})
else:
# Raw transaction construction if func_call is just params dict (rare here)
estimated_gas = 200000
tx_params['gas'] = gas_limit if gas_limit else int(estimated_gas * 1.2) # 20% buffer
# Build
if hasattr(func_call, 'build_transaction'):
tx = func_call.build_transaction(tx_params)
else:
raise ValueError("Invalid function call object")
except ContractLogicError as e:
logger.error(f"❌ Simulation/Estimation failed for {extra_msg}: {e}")
return None
# 4. Sign
signed_tx = account.sign_transaction(tx)
# 5. Send
tx_hash = w3.eth.send_raw_transaction(signed_tx.raw_transaction)
logger.info(f"📤 Sent {extra_msg} | Hash: {tx_hash.hex()}")
# 6. Wait for Receipt
receipt = w3.eth.wait_for_transaction_receipt(tx_hash, timeout=TRANSACTION_TIMEOUT_SECONDS)
# 7. Verify Status
if receipt.status == 1:
logger.info(f"✅ Executed {extra_msg} | Block: {receipt.blockNumber}")
return receipt
else:
logger.error(f"❌ Transaction Reverted {extra_msg} | Hash: {tx_hash.hex()}")
return None
except TimeExhausted:
logger.error(f"⌛ Transaction Timeout {extra_msg} - Check Mempool")
# In a full production bot, we would implement gas bumping here.
return None
except Exception as e:
logger.error(f"❌ Transaction Error {extra_msg}: {e}")
return None
def price_from_sqrt_price_x96(sqrt_price_x96: int, token0_decimals: int, token1_decimals: int) -> Decimal:
"""
Returns price of Token0 in terms of Token1.
"""
sqrt_price = Decimal(sqrt_price_x96)
q96 = Decimal(2) ** 96
price = (sqrt_price / q96) ** 2
# Adjust for decimals: Price = (T1 / 10^d1) / (T0 / 10^d0)
# = (T1/T0) * (10^d0 / 10^d1)
adjustment = Decimal(10) ** (token0_decimals - token1_decimals)
return price * adjustment
def price_from_tick(tick: int, token0_decimals: int, token1_decimals: int) -> Decimal:
price = Decimal("1.0001") ** tick
adjustment = Decimal(10) ** (token0_decimals - token1_decimals)
return price * adjustment
def get_sqrt_ratio_at_tick(tick: int) -> int:
return int((1.0001 ** (tick / 2)) * (2 ** 96))
def get_amounts_for_liquidity(sqrt_ratio_current: int, sqrt_ratio_a: int, sqrt_ratio_b: int, liquidity: int) -> Tuple[int, int]:
if sqrt_ratio_a > sqrt_ratio_b:
sqrt_ratio_a, sqrt_ratio_b = sqrt_ratio_b, sqrt_ratio_a
amount0 = 0
amount1 = 0
Q96 = 1 << 96
# Calculations performed in high-precision integer math (EVM style)
if sqrt_ratio_current <= sqrt_ratio_a:
amount0 = (liquidity * Q96 // sqrt_ratio_a) - (liquidity * Q96 // sqrt_ratio_b)
amount1 = 0
elif sqrt_ratio_current < sqrt_ratio_b:
amount0 = (liquidity * Q96 // sqrt_ratio_current) - (liquidity * Q96 // sqrt_ratio_b)
amount1 = (liquidity * (sqrt_ratio_current - sqrt_ratio_a)) // Q96
else:
amount1 = (liquidity * (sqrt_ratio_b - sqrt_ratio_a)) // Q96
amount0 = 0
return amount0, amount1
# --- CORE LOGIC ---
def get_position_details(w3: Web3, npm_contract, factory_contract, token_id: int):
try:
# Check ownership first to avoid errors? positions() works regardless of owner usually.
position_data = npm_contract.functions.positions(token_id).call()
(nonce, operator, token0_address, token1_address, fee, tickLower, tickUpper, liquidity,
feeGrowthInside0, feeGrowthInside1, tokensOwed0, tokensOwed1) = position_data
token0_contract = w3.eth.contract(address=token0_address, abi=ERC20_ABI)
token1_contract = w3.eth.contract(address=token1_address, abi=ERC20_ABI)
# Multi-call optimization could be used here, but keeping simple for now
token0_symbol = token0_contract.functions.symbol().call()
token1_symbol = token1_contract.functions.symbol().call()
token0_decimals = token0_contract.functions.decimals().call()
token1_decimals = token1_contract.functions.decimals().call()
pool_address = factory_contract.functions.getPool(token0_address, token1_address, fee).call()
if pool_address == '0x0000000000000000000000000000000000000000':
return None, None
pool_contract = w3.eth.contract(address=pool_address, abi=UNISWAP_V3_POOL_ABI)
return {
"token0_address": token0_address, "token1_address": token1_address,
"token0_symbol": token0_symbol, "token1_symbol": token1_symbol,
"token0_decimals": token0_decimals, "token1_decimals": token1_decimals,
"fee": fee, "tickLower": tickLower, "tickUpper": tickUpper, "liquidity": liquidity,
"pool_address": pool_address
}, pool_contract
except Exception as e:
logger.error(f"❌ Error fetching position details for ID {token_id}: {e}")
return None, None
def get_pool_dynamic_data(pool_contract) -> Optional[Dict[str, Any]]:
try:
slot0 = pool_contract.functions.slot0().call()
return {"sqrtPriceX96": slot0[0], "tick": slot0[1]}
except Exception as e:
logger.error(f"❌ Pool data fetch failed: {e}")
return None
def calculate_mint_amounts(current_tick, tick_lower, tick_upper, investment_value_token1: Decimal, decimals0, decimals1, sqrt_price_current_x96) -> Tuple[int, int]:
"""
Calculates required token amounts for a target investment value.
Uses precise Decimal math.
"""
sqrt_price_current = get_sqrt_ratio_at_tick(current_tick)
sqrt_price_lower = get_sqrt_ratio_at_tick(tick_lower)
sqrt_price_upper = get_sqrt_ratio_at_tick(tick_upper)
# Price of T0 in T1
price_t0_in_t1 = price_from_sqrt_price_x96(sqrt_price_current_x96, decimals0, decimals1)
# Calculate amounts for a "Test" liquidity amount
L_test = 1 << 128
amt0_test_wei, amt1_test_wei = get_amounts_for_liquidity(sqrt_price_current, sqrt_price_lower, sqrt_price_upper, L_test)
amt0_test = Decimal(amt0_test_wei) / Decimal(10**decimals0)
amt1_test = Decimal(amt1_test_wei) / Decimal(10**decimals1)
# Value in Token1 terms
value_test = (amt0_test * price_t0_in_t1) + amt1_test
if value_test <= 0:
return 0, 0
scale = investment_value_token1 / value_test
final_amt0_wei = int(Decimal(amt0_test_wei) * scale)
final_amt1_wei = int(Decimal(amt1_test_wei) * scale)
return final_amt0_wei, final_amt1_wei
def ensure_allowance(w3: Web3, account: LocalAccount, token_address: str, spender_address: str, amount_needed: int) -> bool:
"""
Checks if allowance is sufficient, approves if not.
"""
try:
token_c = w3.eth.contract(address=token_address, abi=ERC20_ABI)
allowance = token_c.functions.allowance(account.address, spender_address).call()
if allowance >= amount_needed:
return True
logger.info(f"🔓 Approving {token_address} for {spender_address}...")
# Some tokens (USDT) fail if approving from non-zero to non-zero.
# Safe practice: Approve 0 first if allowance > 0, then new amount.
if allowance > 0:
send_transaction_robust(w3, account, token_c.functions.approve(spender_address, 0), extra_msg="Reset Allowance")
# Approve
receipt = send_transaction_robust(
w3, account,
token_c.functions.approve(spender_address, amount_needed),
extra_msg=f"Approve {token_address}"
)
return receipt is not None
except Exception as e:
logger.error(f"❌ Allowance check/approve failed: {e}")
return False
def check_and_swap_for_deposit(w3: Web3, router_contract, account: LocalAccount, token0: str, token1: str, amount0_needed: int, amount1_needed: int, sqrt_price_x96: int, d0: int, d1: int) -> bool:
"""
Checks balances, wraps ETH if needed, and swaps ONLY the required surplus to meet deposit requirements.
"""
token0 = clean_address(token0)
token1 = clean_address(token1)
token0_c = w3.eth.contract(address=token0, abi=ERC20_ABI)
token1_c = w3.eth.contract(address=token1, abi=ERC20_ABI)
bal0 = token0_c.functions.balanceOf(account.address).call()
bal1 = token1_c.functions.balanceOf(account.address).call()
# Calculate Deficits
deficit0 = max(0, amount0_needed - bal0)
deficit1 = max(0, amount1_needed - bal1)
weth_lower = WETH_ADDRESS.lower()
# --- AUTO WRAP ETH ---
if (deficit0 > 0 and token0.lower() == weth_lower) or (deficit1 > 0 and token1.lower() == weth_lower):
eth_bal = w3.eth.get_balance(account.address)
# Keep 0.01 ETH for gas
gas_reserve = Web3.to_wei(0.01, 'ether')
available_eth = max(0, eth_bal - gas_reserve)
wrap_needed = 0
if token0.lower() == weth_lower: wrap_needed += deficit0
if token1.lower() == weth_lower: wrap_needed += deficit1
amount_to_wrap = min(wrap_needed, available_eth)
if amount_to_wrap > 0:
logger.info(f"🌯 Wrapping {Web3.from_wei(amount_to_wrap, 'ether')} ETH...")
weth_c = w3.eth.contract(address=WETH_ADDRESS, abi=WETH9_ABI)
receipt = send_transaction_robust(w3, account, weth_c.functions.deposit(), value=amount_to_wrap, extra_msg="Wrap ETH")
if receipt:
# Refresh Balances
bal0 = token0_c.functions.balanceOf(account.address).call()
bal1 = token1_c.functions.balanceOf(account.address).call()
deficit0 = max(0, amount0_needed - bal0)
deficit1 = max(0, amount1_needed - bal1)
if deficit0 == 0 and deficit1 == 0:
return True
# --- SWAP SURPLUS ---
# Smart Swap: Calculate exactly how much we need to swap
# Price of Token0 in terms of Token1
price_0_in_1 = price_from_sqrt_price_x96(sqrt_price_x96, d0, d1)
swap_call = None
token_in, token_out = None, None
amount_in = 0
buffer_multiplier = Decimal("1.02") # 2% buffer for slippage/price moves
if deficit0 > 0 and bal1 > amount1_needed:
# Need T0 (ETH), Have extra T1 (USDC)
# Swap T1 -> T0
# Cost in T1 = Deficit0 * Price(T0 in T1)
cost_in_t1 = Decimal(deficit0) / Decimal(10**d0) * price_0_in_1
# Convert back to T1 Wei and apply buffer
amount_in_needed = int(cost_in_t1 * Decimal(10**d1) * buffer_multiplier)
surplus1 = bal1 - amount1_needed
if surplus1 >= amount_in_needed:
token_in, token_out = token1, token0
amount_in = amount_in_needed
logger.info(f"🧮 Calc: Need {deficit0} T0. Cost ~{amount_in_needed} T1. Surplus: {surplus1}")
else:
logger.warning(f"❌ Insufficient Surplus T1. Need {amount_in_needed}, Have {surplus1}")
elif deficit1 > 0 and bal0 > amount0_needed:
# Need T1 (USDC), Have extra T0 (ETH)
# Swap T0 -> T1
# Cost in T0 = Deficit1 / Price(T0 in T1)
if price_0_in_1 > 0:
cost_in_t0 = (Decimal(deficit1) / Decimal(10**d1)) / price_0_in_1
amount_in_needed = int(cost_in_t0 * Decimal(10**d0) * buffer_multiplier)
surplus0 = bal0 - amount0_needed
if surplus0 >= amount_in_needed:
token_in, token_out = token0, token1
amount_in = amount_in_needed
logger.info(f"🧮 Calc: Need {deficit1} T1. Cost ~{amount_in_needed} T0. Surplus: {surplus0}")
else:
logger.warning(f"❌ Insufficient Surplus T0. Need {amount_in_needed}, Have {surplus0}")
if token_in and amount_in > 0:
logger.info(f"🔄 Swapping {amount_in} {token_in} to cover deficit...")
if not ensure_allowance(w3, account, token_in, UNISWAP_V3_SWAP_ROUTER_ADDRESS, amount_in):
return False
params = (
token_in, token_out, 500, account.address,
int(time.time()) + 120,
amount_in,
0, # amountOutMin (Market swap for rebalance)
0
)
receipt = send_transaction_robust(w3, account, router_contract.functions.exactInputSingle(params), extra_msg="Swap Surplus")
if receipt:
# Final check - Recursive check to ensure we hit target or retry
# But return True/False based on immediate check
bal0 = token0_c.functions.balanceOf(account.address).call()
bal1 = token1_c.functions.balanceOf(account.address).call()
# If we are strictly >= needed, great.
if bal0 >= amount0_needed and bal1 >= amount1_needed:
return True
else:
logger.warning(f"⚠️ Swap executed but still short? T0: {bal0}/{amount0_needed}, T1: {bal1}/{amount1_needed}")
return False
logger.warning(f"❌ Insufficient funds (No suitable swap found). T0: {bal0}/{amount0_needed}, T1: {bal1}/{amount1_needed}")
return False
def mint_new_position(w3: Web3, npm_contract, account: LocalAccount, token0: str, token1: str, amount0: int, amount1: int, tick_lower: int, tick_upper: int) -> Optional[Dict]:
"""
Approves tokens and mints a new V3 position.
"""
logger.info("🚀 Minting new position...")
# 1. Approve
if not ensure_allowance(w3, account, token0, NONFUNGIBLE_POSITION_MANAGER_ADDRESS, amount0): return None
if not ensure_allowance(w3, account, token1, NONFUNGIBLE_POSITION_MANAGER_ADDRESS, amount1): return None
# 2. Calculate Min Amounts (Slippage Protection)
# Using 0.5% slippage tolerance
amount0_min = int(Decimal(amount0) * (Decimal(1) - SLIPPAGE_TOLERANCE))
amount1_min = int(Decimal(amount1) * (Decimal(1) - SLIPPAGE_TOLERANCE))
# 3. Mint
params = (
token0, token1, 500,
tick_lower, tick_upper,
amount0, amount1,
amount0_min, amount1_min,
account.address,
int(time.time()) + 180
)
receipt = send_transaction_robust(w3, account, npm_contract.functions.mint(params), extra_msg="Mint Position")
if receipt and receipt.status == 1:
# Parse Logs
try:
# Transfer Event (Topic0)
transfer_topic = Web3.keccak(text="Transfer(address,address,uint256)").hex()
# IncreaseLiquidity Event (Topic0)
increase_liq_topic = Web3.keccak(text="IncreaseLiquidity(uint256,uint128,uint256,uint256)").hex()
minted_data = {'token_id': None, 'amount0': 0, 'amount1': 0}
for log in receipt.logs:
topics = [t.hex() for t in log['topics']]
# Capture Token ID
if topics[0] == transfer_topic:
if "0000000000000000000000000000000000000000" in topics[1]:
minted_data['token_id'] = int(topics[3], 16)
# Capture Amounts
if topics[0] == increase_liq_topic:
# decoding data: liquidity(uint128), amount0(uint256), amount1(uint256)
# data is a single hex string, we need to decode it
data = log['data'].hex()
if data.startswith('0x'):
data = data[2:]
# liquidity is first 32 bytes (padded), amt0 next 32, amt1 next 32
minted_data['amount0'] = int(data[64:128], 16)
minted_data['amount1'] = int(data[128:192], 16)
if minted_data['token_id']:
# Format for Log
# Assuming Token0=WETH (18), Token1=USDC (6) - should use fetched decimals ideally
# We fetched decimals in main(), but here we can assume or pass them.
# For simplicity, I'll pass them or use defaults since this is specific to this pair
d0, d1 = 18, 6
fmt_amt0 = Decimal(minted_data['amount0']) / Decimal(10**d0)
fmt_amt1 = Decimal(minted_data['amount1']) / Decimal(10**d1)
logger.info(f"✅ POSITION OPENED | ID: {minted_data['token_id']} | Deposited: {fmt_amt0:.6f} WETH + {fmt_amt1:.2f} USDC")
return minted_data
except Exception as e:
logger.warning(f"Minted but failed to parse details: {e}")
return None
def decrease_liquidity(w3: Web3, npm_contract, account: LocalAccount, token_id: int, liquidity: int) -> bool:
if liquidity == 0: return True
logger.info(f"📉 Decreasing Liquidity for {token_id}...")
params = (
token_id,
liquidity,
0, 0, # amountMin0, amountMin1
int(time.time()) + 180
)
receipt = send_transaction_robust(w3, account, npm_contract.functions.decreaseLiquidity(params), extra_msg=f"Decrease Liq {token_id}")
if receipt and receipt.status == 1:
try:
# Parse DecreaseLiquidity Event
decrease_topic = Web3.keccak(text="DecreaseLiquidity(uint256,uint128,uint256,uint256)").hex()
amt0, amt1 = 0, 0
for log in receipt.logs:
topics = [t.hex() for t in log['topics']]
if topics[0] == decrease_topic:
# Check tokenID (topic 1)
if int(topics[1], 16) == token_id:
data = log['data'].hex()[2:]
# liquidity (32), amt0 (32), amt1 (32)
amt0 = int(data[64:128], 16)
amt1 = int(data[128:192], 16)
break
d0, d1 = 18, 6 # Assuming WETH/USDC
fmt_amt0 = Decimal(amt0) / Decimal(10**d0)
fmt_amt1 = Decimal(amt1) / Decimal(10**d1)
logger.info(f"📉 POSITION CLOSED (Liquidity Removed) | ID: {token_id} | Withdrawn: {fmt_amt0:.6f} WETH + {fmt_amt1:.2f} USDC")
except Exception as e:
logger.warning(f"Closed but failed to parse details: {e}")
return True
return False
def collect_fees(w3: Web3, npm_contract, account: LocalAccount, token_id: int) -> bool:
logger.info(f"💰 Collecting Fees for {token_id}...")
max_val = 2**128 - 1
params = (
token_id,
account.address,
max_val, max_val
)
receipt = send_transaction_robust(w3, account, npm_contract.functions.collect(params), extra_msg=f"Collect Fees {token_id}")
return receipt is not None
# --- STATE MANAGEMENT ---
def load_status_data() -> List[Dict]:
if not os.path.exists(STATUS_FILE):
return []
try:
with open(STATUS_FILE, 'r') as f:
return json.load(f)
except:
return []
def save_status_data(data: List[Dict]):
with open(STATUS_FILE, 'w') as f:
json.dump(data, f, indent=2)
def update_position_status(token_id: int, status: str, extra_data: Dict = {}):
data = load_status_data()
# Find existing or create new
entry = next((item for item in data if item.get('token_id') == token_id), None)
if not entry:
if status in ["OPEN", "PENDING_HEDGE"]:
entry = {"type": "AUTOMATIC", "token_id": token_id}
data.append(entry)
else:
return # Can't update non-existent position unless opening
entry['status'] = status
entry.update(extra_data)
if status == "CLOSED":
entry['timestamp_close'] = int(time.time())
save_status_data(data)
logger.info(f"💾 Updated Position {token_id} status to {status}")
# --- MAIN LOOP ---
def main():
logger.info("🔷 Uniswap Manager V2 (Refactored) Starting...")
load_dotenv(override=True)
rpc_url = os.environ.get("MAINNET_RPC_URL")
private_key = os.environ.get("MAIN_WALLET_PRIVATE_KEY") or os.environ.get("PRIVATE_KEY")
if not rpc_url or not private_key:
logger.error("❌ Missing RPC or Private Key in .env")
return
w3 = Web3(Web3.HTTPProvider(rpc_url))
if not w3.is_connected():
logger.error("❌ Could not connect to RPC")
return
account = Account.from_key(private_key)
logger.info(f"👤 Wallet: {account.address}")
# Contracts
npm = w3.eth.contract(address=clean_address(NONFUNGIBLE_POSITION_MANAGER_ADDRESS), abi=NONFUNGIBLE_POSITION_MANAGER_ABI)
factory_addr = npm.functions.factory().call()
factory = w3.eth.contract(address=factory_addr, abi=UNISWAP_V3_FACTORY_ABI)
router = w3.eth.contract(address=clean_address(UNISWAP_V3_SWAP_ROUTER_ADDRESS), abi=SWAP_ROUTER_ABI)
while True:
try:
status_data = load_status_data()
open_positions = [p for p in status_data if p.get('status') == 'OPEN']
active_auto_pos = next((p for p in open_positions if p.get('type') == 'AUTOMATIC'), None)
if active_auto_pos:
token_id = active_auto_pos['token_id']
pos_details, pool_c = get_position_details(w3, npm, factory, token_id)
if pos_details:
pool_data = get_pool_dynamic_data(pool_c)
current_tick = pool_data['tick']
# Check Range
tick_lower = pos_details['tickLower']
tick_upper = pos_details['tickUpper']
in_range = tick_lower <= current_tick < tick_upper
# Calculate Prices for logging
current_price = price_from_tick(current_tick, pos_details['token0_decimals'], pos_details['token1_decimals'])
lower_price = price_from_tick(tick_lower, pos_details['token0_decimals'], pos_details['token1_decimals'])
upper_price = price_from_tick(tick_upper, pos_details['token0_decimals'], pos_details['token1_decimals'])
status_msg = "✅ IN RANGE" if in_range else "⚠️ OUT OF RANGE"
# Calculate Unclaimed Fees (Simulation)
unclaimed0, unclaimed1, total_fees_usd = 0, 0, 0
try:
# Call collect with zero address to simulate fee estimation
fees_sim = npm.functions.collect((token_id, "0x0000000000000000000000000000000000000000", 2**128-1, 2**128-1)).call({'from': account.address})
unclaimed0 = to_decimal(fees_sim[0], pos_details['token0_decimals'])
unclaimed1 = to_decimal(fees_sim[1], pos_details['token1_decimals'])
total_fees_usd = (unclaimed0 * current_price) + unclaimed1
except Exception as e:
logger.debug(f"Fee simulation failed for {token_id}: {e}")
fee_text = f" | Fees: {unclaimed0:.4f}/{unclaimed1:.2f} (~${total_fees_usd:.2f})"
logger.info(f"Position {token_id}: {status_msg} | Price: {current_price:.4f} [{lower_price:.4f} - {upper_price:.4f}]{fee_text}")
if not in_range and CLOSE_POSITION_ENABLED:
logger.warning(f"🛑 Closing Position {token_id} (Out of Range)")
update_position_status(token_id, "CLOSING")
# 1. Remove Liquidity
if decrease_liquidity(w3, npm, account, token_id, pos_details['liquidity']):
# 2. Collect Fees
collect_fees(w3, npm, account, token_id)
update_position_status(token_id, "CLOSED")
# 3. Optional Rebalance (Sell 50% WETH if fell below)
if REBALANCE_ON_CLOSE_BELOW_RANGE and current_tick < tick_lower:
# Simple rebalance logic here (similar to original check_and_swap surplus logic)
pass
elif OPEN_POSITION_ENABLED:
logger.info("🔍 No active position. Analyzing market...")
# Setup logic for new position
token0 = clean_address(WETH_ADDRESS)
token1 = clean_address(USDC_ADDRESS)
fee = 500
pool_addr = factory.functions.getPool(token0, token1, fee).call()
pool_c = w3.eth.contract(address=pool_addr, abi=UNISWAP_V3_POOL_ABI)
pool_data = get_pool_dynamic_data(pool_c)
if pool_data:
tick = pool_data['tick']
# Define Range (+/- 2.5%)
# log(1.025) / log(1.0001) approx 247 tick delta
tick_delta = int(math.log(1 + float(RANGE_WIDTH_PCT)) / math.log(1.0001))
tick_spacing = 10
tick_lower = (tick - tick_delta) // tick_spacing * tick_spacing
tick_upper = (tick + tick_delta) // tick_spacing * tick_spacing
# Calculate Amounts
# Target Value logic
d0 = 18 # WETH
d1 = 6 # USDC
if str(TARGET_INVESTMENT_VALUE_USDC).upper() == "MAX":
# Fetch balances
token0_c = w3.eth.contract(address=token0, abi=ERC20_ABI)
token1_c = w3.eth.contract(address=token1, abi=ERC20_ABI)
bal0 = Decimal(token0_c.functions.balanceOf(account.address).call()) / Decimal(10**d0)
bal1 = Decimal(token1_c.functions.balanceOf(account.address).call()) / Decimal(10**d1)
price_eth_usdc = price_from_sqrt_price_x96(pool_data['sqrtPriceX96'], d0, d1)
total_val_usd = (bal0 * price_eth_usdc) + bal1
# Apply Buffer ($200)
investment_val_dec = max(Decimal(0), total_val_usd - Decimal(200))
logger.info(f"🎯 MAX Investment Mode: Wallet ${total_val_usd:.2f} -> Target ${investment_val_dec:.2f} (Buffer $200)")
else:
investment_val_dec = Decimal(str(TARGET_INVESTMENT_VALUE_USDC))
amt0, amt1 = calculate_mint_amounts(tick, tick_lower, tick_upper, investment_val_dec, d0, d1, pool_data['sqrtPriceX96'])
if check_and_swap_for_deposit(w3, router, account, token0, token1, amt0, amt1, pool_data['sqrtPriceX96'], d0, d1):
minted = mint_new_position(w3, npm, account, token0, token1, amt0, amt1, tick_lower, tick_upper)
if minted:
# Calculate entry price and amounts for JSON compatibility
entry_price = float(price_from_sqrt_price_x96(pool_data['sqrtPriceX96'], d0, d1))
fmt_amt0 = float(Decimal(minted['amount0']) / Decimal(10**d0))
fmt_amt1 = float(Decimal(minted['amount1']) / Decimal(10**d1))
# Calculate actual initial value
actual_value = (fmt_amt0 * entry_price) + fmt_amt1
# Prepare ordered data with specific rounding
new_position_data = {
"type": "AUTOMATIC", # Will be handled by update_position_status logic if new
"target_value": round(float(actual_value), 2),
"entry_price": round(entry_price, 2),
"amount0_initial": round(fmt_amt0, 4),
"amount1_initial": round(fmt_amt1, 2),
"range_upper": round(float(price_from_tick(tick_upper, d0, d1)), 2),
"range_lower": round(float(price_from_tick(tick_lower, d0, d1)), 2),
"timestamp_open": int(time.time())
}
update_position_status(minted['token_id'], "OPEN", new_position_data)
time.sleep(MONITOR_INTERVAL_SECONDS)
except KeyboardInterrupt:
logger.info("👋 Exiting...")
break
except Exception as e:
logger.error(f"❌ Main Loop Error: {e}")
time.sleep(MONITOR_INTERVAL_SECONDS)
if __name__ == "__main__":
main()