diff --git a/aerodrome/tools/create_agent.py b/aerodrome/tools/create_agent.py new file mode 100644 index 0000000..dcc4e9d --- /dev/null +++ b/aerodrome/tools/create_agent.py @@ -0,0 +1,70 @@ +import os +from eth_account import Account +from hyperliquid.exchange import Exchange +from hyperliquid.utils import constants +from dotenv import load_dotenv +from datetime import datetime, timedelta +import json + +# Load environment variables from a .env file if it exists +load_dotenv() + +def create_and_authorize_agent(): + """ + Creates and authorizes a new agent key pair using your main wallet, + following the correct SDK pattern. + """ + # --- STEP 1: Load your main wallet --- + # This is the wallet that holds the funds and has been activated on Hyperliquid. + main_wallet_private_key = os.environ.get("MAIN_WALLET_PRIVATE_KEY") + if not main_wallet_private_key: + main_wallet_private_key = input("Please enter the private key of your MAIN trading wallet: ") + + try: + main_account = Account.from_key(main_wallet_private_key) + print(f"\nāœ… Loaded main wallet: {main_account.address}") + except Exception as e: + print(f"āŒ Error: Invalid main wallet private key provided. Details: {e}") + return + + # --- STEP 2: Initialize the Exchange with your MAIN account --- + # This object is used to send the authorization transaction. + exchange = Exchange(main_account, constants.MAINNET_API_URL, account_address=main_account.address) + + # --- STEP 3: Create and approve the agent with a specific name --- + # agent name must be between 1 and 16 characters long + agent_name = "my_new_agent" + + print(f"\nšŸ”— Authorizing a new agent named '{agent_name}'...") + try: + # --- FIX: Pass only the agent name string to the function --- + approve_result, agent_private_key = exchange.approve_agent(agent_name) + + if approve_result.get("status") == "ok": + # Derive the agent's public address from the key we received + agent_account = Account.from_key(agent_private_key) + + print("\nšŸŽ‰ SUCCESS! Agent has been authorized on-chain.") + print("="*50) + print("SAVE THESE SECURELY. This is what your bot will use.") + print(f" Name: {agent_name}") + print(f" (Agent has a default long-term validity)") + print(f"šŸ”‘ Agent Private Key: {agent_private_key}") + print(f"šŸ  Agent Address: {agent_account.address}") + print("="*50) + print("\nYou can now set this private key as the AGENT_PRIVATE_KEY environment variable.") + else: + print("\nāŒ ERROR: Agent authorization failed.") + print(" Response:", approve_result) + if "Vault may not perform this action" in str(approve_result): + print("\n ACTION REQUIRED: This error means your main wallet (vault) has not been activated. " + "Please go to the Hyperliquid website, connect this wallet, and make a deposit to activate it.") + + + except Exception as e: + print(f"\nAn unexpected error occurred during authorization: {e}") + + +if __name__ == "__main__": + create_and_authorize_agent() + diff --git a/aerodrome/tools/kpi_tracker.py b/aerodrome/tools/kpi_tracker.py new file mode 100644 index 0000000..dda3f3e --- /dev/null +++ b/aerodrome/tools/kpi_tracker.py @@ -0,0 +1,134 @@ +import os +import csv +import time +import logging +from decimal import Decimal +from typing import Dict, Optional + +# Setup Logger +logger = logging.getLogger("KPI_TRACKER") +logger.setLevel(logging.INFO) +# Basic handler if not already handled by parent +if not logger.handlers: + ch = logging.StreamHandler() + formatter = logging.Formatter('%(asctime)s - KPI - %(message)s') + ch.setFormatter(formatter) + logger.addHandler(ch) + +KPI_FILE = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'logs', 'kpi_history.csv') + +def initialize_kpi_csv(): + """Creates the CSV with headers if it doesn't exist.""" + if not os.path.exists(os.path.dirname(KPI_FILE)): + os.makedirs(os.path.dirname(KPI_FILE)) + + if not os.path.exists(KPI_FILE): + with open(KPI_FILE, 'w', newline='') as f: + writer = csv.writer(f) + writer.writerow([ + "Timestamp", + "Date", + "NAV_Total_USD", + "Benchmark_HODL_USD", + "Alpha_USD", + "Uniswap_Val_USD", + "Uniswap_Fees_Claimed_USD", + "Uniswap_Fees_Unclaimed_USD", + "Hedge_Equity_USD", + "Hedge_PnL_Realized_USD", + "Hedge_Fees_Paid_USD", + "ETH_Price", + "Fee_Coverage_Ratio" + ]) + +def calculate_hodl_benchmark(initial_eth: Decimal, initial_usdc: Decimal, initial_hedge_usdc: Decimal, current_eth_price: Decimal) -> Decimal: + """Calculates value if assets were just held (Wallet Assets + Hedge Account Cash).""" + return (initial_eth * current_eth_price) + initial_usdc + initial_hedge_usdc + +def log_kpi_snapshot( + snapshot_data: Dict[str, float] +): + """ + Logs a KPI snapshot to CSV. + Expected keys in snapshot_data: + - initial_eth, initial_usdc, initial_hedge_usdc + - current_eth_price + - uniswap_pos_value_usd + - uniswap_fees_claimed_usd + - uniswap_fees_unclaimed_usd + - hedge_equity_usd + - hedge_pnl_realized_usd + - hedge_fees_paid_usd + - wallet_eth_bal, wallet_usdc_bal (Optional, for full NAV) + """ + try: + initialize_kpi_csv() + + # Convert all inputs to Decimal for precision + price = Decimal(str(snapshot_data.get('current_eth_price', 0))) + + # 1. Benchmark (HODL) + init_eth = Decimal(str(snapshot_data.get('initial_eth', 0))) + init_usdc = Decimal(str(snapshot_data.get('initial_usdc', 0))) + init_hedge = Decimal(str(snapshot_data.get('initial_hedge_usdc', 0))) + benchmark_val = calculate_hodl_benchmark(init_eth, init_usdc, init_hedge, price) + + # 2. Strategy NAV (Net Asset Value) + # NAV = Uni Pos + Uni Fees (Claimed+Unclaimed) + Hedge Equity + (Wallet Surplus - Initial Wallet Surplus?) + # For simplicity, we focus on the Strategy PnL components: + # Strategy Val = (Current Uni Pos) + (Claimed Fees) + (Unclaimed Fees) + (Hedge PnL Realized) + (Hedge Unrealized?) + # Note: Hedge Equity usually includes margin. We strictly want "Value Generated". + + uni_val = Decimal(str(snapshot_data.get('uniswap_pos_value_usd', 0))) + uni_fees_claimed = Decimal(str(snapshot_data.get('uniswap_fees_claimed_usd', 0))) + uni_fees_unclaimed = Decimal(str(snapshot_data.get('uniswap_fees_unclaimed_usd', 0))) + + # Hedge PnL (Realized + Unrealized) is better than Equity for PnL tracking, + # but Equity represents actual redeemable cash. Let's use Equity if provided, or PnL components. + hedge_equity = Decimal(str(snapshot_data.get('hedge_equity_usd', 0))) + hedge_fees = Decimal(str(snapshot_data.get('hedge_fees_paid_usd', 0))) + + # Simplified NAV for Strategy Comparison: + # We assume 'hedge_equity' is the Liquidation Value of the hedge account. + # But if we want strictly "Strategy Performance", we usually do: + # Current Value = Uni_Val + Unclaimed + Hedge_Equity + # (Assuming Hedge_Equity started at 0 or we track delta? No, usually Hedge Account has deposit). + + # Let's define NAV as Total Current Liquidation Value of Strategy Components + current_nav = uni_val + uni_fees_unclaimed + uni_fees_claimed + hedge_equity + + # Alpha + alpha = current_nav - benchmark_val + + # Coverage Ratio + total_hedge_cost = abs(hedge_fees) # + funding if available + total_uni_earnings = uni_fees_claimed + uni_fees_unclaimed + + if total_hedge_cost > 0: + coverage_ratio = total_uni_earnings / total_hedge_cost + else: + coverage_ratio = Decimal("999.0") # Infinite/Good + + # Write + with open(KPI_FILE, 'a', newline='') as f: + writer = csv.writer(f) + writer.writerow([ + int(time.time()), + time.strftime('%Y-%m-%d %H:%M:%S'), + f"{current_nav:.2f}", + f"{benchmark_val:.2f}", + f"{alpha:.2f}", + f"{uni_val:.2f}", + f"{uni_fees_claimed:.2f}", + f"{uni_fees_unclaimed:.2f}", + f"{hedge_equity:.2f}", + f"{snapshot_data.get('hedge_pnl_realized_usd', 0):.2f}", + f"{hedge_fees:.2f}", + f"{price:.2f}", + f"{coverage_ratio:.2f}" + ]) + + logger.info(f"šŸ“Š KPI Logged | NAV: ${current_nav:.2f} | Benchmark: ${benchmark_val:.2f} | Alpha: ${alpha:.2f}") + + except Exception as e: + logger.error(f"Failed to log KPI: {e}") diff --git a/todo/ANALYSIS_TEMPLATE.md b/todo/ANALYSIS_TEMPLATE.md new file mode 100644 index 0000000..8a0e931 --- /dev/null +++ b/todo/ANALYSIS_TEMPLATE.md @@ -0,0 +1,70 @@ +# Analysis Request: [Insert Topic Here] + +**Status:** [Draft / Pending Analysis / Completed] +**Date:** [YYYY-MM-DD] +**Priority:** [Low / Medium / High] + +--- + +## 1. User Description & Query +*(User: Fill this section with your design ideas, questions, code snippets, or links to files/web resources. Be as specific as possible about the goal.)* + +### Context +* **Goal:** +* **Current Behavior:** +* **Desired Behavior:** + +### References +* **Files:** `[filename.py]`, `[path/to/module]` +* **Links:** `[url]` + +### Specific Questions / Hypothesis +1. +2. + +--- + +*(The sections below are to be filled by the AI Agent upon request)* + +## 2. Agent Summary +*(AI: Summarize the user's request to ensure alignment on the objective.)* + +* **Objective:** +* **Key Constraints:** +* **Scope:** + +## 3. Main Analysis +*(AI: Perform the deep dive here. Use codebase knowledge, logic, and simulations.)* + +### 3.1 Codebase Investigation +* **Affected Components:** +* **Data Flow Analysis:** +* **Current Limitation/Bug:** + +### 3.2 Technical Options / Trade-offs +| Option | Pros | Cons | Complexity | +| :--- | :--- | :--- | :--- | +| **A. [Strategy A]** | ... | ... | ... | +| **B. [Strategy B]** | ... | ... | ... | + +### 3.3 Proposed Solution Design +* **Architecture:** +* **Logic Changes:** +* **Edge Cases Considered:** + +## 4. Risk Assessment +*(AI: What could go wrong? Performance, Security, or Stability impacts.)* + +* **Risk 1:** [Description] -> *Mitigation:* [Strategy] + +## 5. Conclusion +*(AI: Final verdict and recommendation.)* + +* **Recommendation:** + +## 6. Implementation Plan +*(AI: Step-by-step checklist to execute the recommendation.)* + +- [ ] Step 1: +- [ ] Step 2: +- [ ] Step 3: diff --git a/todo/ATR_over_week.PNG b/todo/ATR_over_week.PNG new file mode 100644 index 0000000..6387b5f Binary files /dev/null and b/todo/ATR_over_week.PNG differ diff --git a/todo/DYNAMIC_TIMEOUT_ANALYSIS.md b/todo/DYNAMIC_TIMEOUT_ANALYSIS.md new file mode 100644 index 0000000..2db4c38 --- /dev/null +++ b/todo/DYNAMIC_TIMEOUT_ANALYSIS.md @@ -0,0 +1,43 @@ +# Analysis Request: Dynamic Timeout for Shadow Orders + +**Status:** Completed +**Date:** 2025-12-20 +**Priority:** Medium + +--- + +## 1. User Description & Query +**Goal:** Optimize the Shadow Order simulation by making the "Time to Live" (timeout) dynamic based on market volatility. +**Logic:** +* **Low Volatility (Slow Market):** Orders sit on the book longer. Give the shadow order more time (e.g., 60s) to fill. +* **High Volatility (Fast Market):** Price moves quickly. If it doesn't fill instantly, it likely ran away. Timeout should be short (e.g., 10s) to fail fast. + +## 2. Agent Summary +* **Objective:** Implement a dynamic timeout calculation during shadow order creation. +* **Formula:** Inverse relationship with volatility. + +## 3. Main Analysis + +### 3.1 Volatility Scaling +We already calculate `vol_pct` (5-min rolling StdDev). +* **Base Vol:** `0.05%` (0.0005). +* **Base Timeout:** 30 seconds. + +**Formula:** +`timeout = Base_Timeout * (Base_Vol / Current_Vol)` + +**Examples:** +* **Low Vol (0.025%):** `30 * (0.05 / 0.025) = 60s` (Max cap). +* **Normal Vol (0.05%):** `30 * (0.05 / 0.05) = 30s`. +* **High Vol (0.15%):** `30 * (0.05 / 0.15) = 10s`. + +### 3.2 Constraints +* **Min Timeout:** 10s (Give at least some chance even in crazy markets). +* **Max Timeout:** 60s (Don't track stale orders forever). + +## 4. Conclusion +**Recommendation:** Implement this dynamic timeout alongside the shadow order logic. It makes the "Success/Fail" metric much more realistic for a Maker strategy in different regimes. + +## 5. Implementation Plan +- [ ] **Step 1:** Add `get_dynamic_timeout(vol_pct)` helper method. +- [ ] **Step 2:** Use this timeout when creating the shadow order entry. diff --git a/todo/EXTENDED_SHADOW_TIMEOUT.md b/todo/EXTENDED_SHADOW_TIMEOUT.md new file mode 100644 index 0000000..54db25a --- /dev/null +++ b/todo/EXTENDED_SHADOW_TIMEOUT.md @@ -0,0 +1,44 @@ +# Analysis Request: Extended Timeout for Shadow Orders + +**Status:** Completed +**Date:** 2025-12-20 +**Priority:** Medium + +--- + +## 1. User Description & Query +**Goal:** Extend the Shadow Order timeout to **10 minutes (600s)** to capture the "Mean Time to Execution" across different market conditions. +**Reasoning:** Instead of failing fast (which assumes Maker is bad if not instant), we want to gather data on *how long* it actually takes to fill. This helps optimize the timeout later. + +## 2. Agent Summary +* **Objective:** Modify `clp_hedger.py` to use a long fixed timeout (or much longer dynamic timeout) for shadow orders. +* **Risk:** "Success" at 9 minutes is effectively a "Failure" for hedging (Delta Drift). +* **Mitigation:** We are collecting *data*, not executing trades. A 9-minute fill log is valuable data point (it tells us "Maker is impossible here"). + +## 3. Main Analysis + +### 3.1 Data Value vs. Hedging Reality +* **Hedging Reality:** If a hedge takes > 30s to fill, the price has likely moved significantly. The "Hedge" is no longer hedging the original risk. +* **Data Value:** By waiting 10 minutes, we can generate a distribution curve: + * *50% fill in < 5s* (Great!) + * *30% fill in 5s-60s* (Okay for stable markets) + * *20% fill in > 60s* (Terrible) + * *If we used a 60s timeout, we would just see "20% Failed", losing the nuance.* + +### 3.2 Implementation Strategy +Instead of a complex dynamic timeout for now, let's set a **Fixed Long Timeout (600s)** for the Shadow Simulator. +* **Why?** We want to see the *actual* fill time for every order, not cut it off artificially. +* **Logging:** The log `filled in X.Xs` becomes the primary metric. + +### 3.3 Memory Impact +* Even with 1 trade per minute, 10 minutes = 10 items in the list. +* Memory usage is negligible (<1KB). + +## 4. Conclusion +**Recommendation:** Switch the Shadow Order logic to use a **Fixed 600s Timeout**. +* This turns the simulator into a "Fill Time Data Collector". +* We can analyze the logs later to find the "Optimal Timeout" (e.g., "95% of fills happen within 45s, so set timeout to 45s"). + +## 5. Implementation Plan +- [ ] **Step 1:** In `clp_hedger.py`, replace the dynamic timeout calculation with `timeout = 600`. +- [ ] **Step 2:** Update logging to ensure `fill_time` is prominent. diff --git a/todo/KPI_IMPLEMENTATION_PLAN.md b/todo/KPI_IMPLEMENTATION_PLAN.md new file mode 100644 index 0000000..633f117 --- /dev/null +++ b/todo/KPI_IMPLEMENTATION_PLAN.md @@ -0,0 +1,57 @@ +# KPI Implementation Proposal + +**Status:** Proposed +**Date:** 2025-12-20 + +## 1. Objective +Implement a robust KPI tracking system to answer: "Is this strategy actually making money compared to HODLing?" + +## 2. Architecture +We will introduce a lightweight **KPI Module** (`tools/kpi_tracker.py`) that is called periodically by `uniswap_manager.py`. + +### A. Data Sources +1. **Uniswap V3:** Current Position Value + Unclaimed Fees (from `uniswap_manager.py`). +2. **Hyperliquid:** Equity + Unrealized PnL (from `clp_hedger.py` / API). +3. **Wallet:** ETH/USDC balances (from Web3). +4. **History:** Initial Amounts (from `hedge_status.json`). + +### B. The Metrics (KPIs) + +#### 1. Net Asset Value (NAV) +* `NAV = (Uniswap Pos Value) + (Hyperliquid Equity) + (Wallet ETH * Price) + (Wallet USDC)` +* *Note:* Allows tracking total portfolio health. + +#### 2. Strategy vs. Benchmark (Alpha) +* **Strategy Value:** `Current NAV` +* **Benchmark Value (HODL):** + * Snapshot at start: `Initial ETH` + `Initial USDC`. + * Current Val: `(Initial ETH * Current Price) + Initial USDC`. +* **Alpha:** `Strategy Value - Benchmark Value`. + +#### 3. Fee Coverage Ratio +* `Ratio = (Uniswap Fees Earned) / (Hedge Cost)` +* *Hedge Cost:* Fees paid on Hyperliquid + Funding Paid. + +## 3. Implementation Plan + +### Step 1: Create `tools/kpi_tracker.py` +This module will handle the math and logging. +* **Functions:** + * `log_kpi_snapshot(nav_data, market_data)`: Appends to CSV. + * `calculate_benchmark(initial_snapshot, current_price)`: Returns HODL value. + +### Step 2: CSV Schema (`logs/kpi_history.csv`) +| Timestamp | NAV | Benchmark_NAV | Alpha | Uniswap_Fees_Acc | Hedge_Cost_Acc | Fee_Coverage | ETH_Price | +| :--- | :--- | :--- | :--- | :--- | :--- | :--- | :--- | +| ... | ... | ... | ... | ... | ... | ... | ... | + +### Step 3: Integration +* **Hook into `uniswap_manager.py`:** + * Every loop (or every hour), gather data. + * Call `kpi_tracker.log_kpi_snapshot()`. +* **Note:** `uniswap_manager.py` manages the slow loop, so it's the perfect place to record "macro" performance without adding latency to the hedger. + +## 4. Next Steps +1. Approve this plan? +2. I will generate `tools/kpi_tracker.py`. +3. I will integrate it into `uniswap_manager.py`. diff --git a/todo/MAKER_VS_TAKER_ANALYSIS.md b/todo/MAKER_VS_TAKER_ANALYSIS.md new file mode 100644 index 0000000..be7e157 --- /dev/null +++ b/todo/MAKER_VS_TAKER_ANALYSIS.md @@ -0,0 +1,79 @@ +# Analysis Request: Maker vs Taker for Hedging Optimization + +**Status:** Completed +**Date:** 2025-12-20 +**Priority:** Medium + +--- + +## 1. User Description & Query +**Goal:** Determine if using **Maker (Alo)** orders instead of **Taker (Ioc)** orders for "safe" rebalancing (in the middle of the range) would improve PnL. +**Hypothesis:** Maker orders earn rebates (or pay lower fees) and capture the spread, but risk non-execution (delta drift). Taker orders pay fees and cross the spread but guarantee immediate hedging. + +### Specific Questions +1. What data is needed to evaluate this trade-off? +2. How to measure the "Cost of Waiting" (Delta Drift)? + +--- + +## 2. Agent Summary +* **Objective:** Define the data requirements to backtest/simulate a "Maker Hedging Strategy." +* **Current State:** `clp_hedger.py` predominantly uses Taker orders (`Ioc`) for rebalancing to ensure the hedge matches the Uniswap delta instantly. Maker orders (`Alo`) are only used for passive closing. + +## 3. Main Analysis + +### 3.1 The Trade-off Equation +To know if Maker is better, we must solve: +$$ \text{Gain} > \text{Loss} $$ +$$ (\text{Spread Capture} + \text{Fee Rebate}) > (\text{Delta Drift Cost} + \text{Opportunity Cost}) $$ + +* **Spread Capture:** Selling at Ask vs. Selling at Bid. +* **Fee Rebate:** Hyperliquid pays rebates for Maker orders (e.g., 0.02%) vs charging Taker fees (e.g., 0.035%). Total swing ~0.055%. +* **Delta Drift:** While your Maker order sits on the book, the price moves. Your Uniswap LP delta changes, but your hedge delta doesn't. You are "under-hedged" or "over-hedged" for those seconds. + +### 3.2 Required Data for Simulation + +To simulate this, we cannot just look at OHLC candles. We need **Tick-Level Order Book Data**. + +#### A. Order Book Snapshots (The "Opportunity") +* **Metric:** **Bid-Ask Spread Width** over time. + * *Why:* If the spread is 0.01% (tight), Maker offers little gain. If it's 0.1% (wide), capturing it is huge. + * *Data:* `timestamp`, `best_bid`, `best_ask`. + +#### B. Trade Flow / Fill Probability (The "Risk") +* **Metric:** **Time to Fill (at Best Bid/Ask)**. + * *Why:* If you place a Maker buy at Best Bid, how long until someone sells into you? 1 second? 1 minute? + * *Data:* You need a recording of **all public trades** on Hyperliquid to match against your theoretical order price. + +#### C. Price Velocity (The "Drift") +* **Metric:** **Price Change per Second** during the "Wait Time". + * *Why:* If you wait 5 seconds for a fill, and ETH moves 0.2%, you just lost 0.2% in unhedged exposure to save 0.05% in fees. Bad trade. + +### 3.3 Implemented Solution: Shadow Order Simulator +The system now runs a real-time simulation engine to verify Maker feasibility without risking capital. + +#### Mechanism: +1. **Creation:** Every successful Taker trade (`Ioc`) triggers the creation of a "Shadow Order" at the passive price (Bid for Buy, Ask for Sell). +2. **Dynamic Timeout:** The "Time to Live" for the simulation is inversely proportional to volatility (`calculate_volatility()`): + * **Low Vol (Quiet):** Wait up to **60s**. + * **High Vol (Fast):** Timeout after **10s**. + * *Rationale:* In fast markets, if a fill isn't immediate, the price has likely moved too far, making a Maker strategy dangerous. +3. **Verification:** The main loop checks these shadow orders every tick against current `Ask` (for Buy) or `Bid` (for Sell) prices to confirm if a fill *definitely* would have occurred. + +#### Data Captured: +* `[SHADOW] SUCCESS`: Maker order would have filled (capturing spread + rebate). +* `[SHADOW] FAILED`: Price ran away (Taker was the correct choice to prevent delta drift). + +## 4. Risk Assessment +* **Risk:** **Adverse Selection.** Market makers (bots) are faster than you. They will only fill your order when the market is moving *against* you (e.g., you are buying, price is crashing). + * *Mitigation:* Analyze "Time to Success" logs. If successes only happen at the very end of the 60s window, the "Drift Cost" might exceed the "Spread Gain." + +## 5. Conclusion +**Recommendation:** +1. **Monitor Logs:** Let the simulator run for 48-72 hours across different market regimes (Weekend vs. Weekday). +2. **Decision Metric:** If Success Rate > 80% and Avg Fill Time < 15s, proceed to implement a "Passive First" hedging mode for the safe center of the CLP range. + +## 6. Implementation Plan +- [x] **Step 1:** Update `clp_hedger.py` to fetch and log `Bid` and `Ask` explicitly during execution. +- [x] **Step 2:** Implement `check_shadow_orders` and state management in `ScalperHedger`. +- [x] **Step 3:** Implement dynamic timeout logic based on rolling StdDev. diff --git a/todo/SHADOW_ORDER_ANALYSIS.md b/todo/SHADOW_ORDER_ANALYSIS.md new file mode 100644 index 0000000..ae0e974 --- /dev/null +++ b/todo/SHADOW_ORDER_ANALYSIS.md @@ -0,0 +1,68 @@ +# Analysis Request: Implement Shadow Order Verification + +**Status:** Completed +**Date:** 2025-12-20 +**Priority:** Medium + +--- + +## 1. User Description & Query +**Goal:** Verify if a Maker order (at the opposite side of the book) *would* have been filled if we had used it instead of a Taker order. +**Mechanism:** +1. When a Taker trade executes, record a "Shadow Order" at the *passive* price (e.g., if Taker Buying at Ask, Shadow Buy at Bid). +2. Check the market for the next 15 seconds. +3. If price crosses the Shadow Price, log "Success". If 15s passes without fill, log "Failed". + +## 2. Agent Summary +* **Objective:** Implement a lightweight "Shadow Order Simulator" inside `clp_hedger.py`. +* **Key Logic:** + * `Shadow Buy` at $3000 (Current Bid). *Fills if Future Ask <= $3000.* + * `Shadow Sell` at $3001 (Current Ask). *Fills if Future Bid >= $3001.* + * **Wait Time:** 15 seconds max. + +## 3. Main Analysis + +### 3.1 Logic Changes in `clp_hedger.py` + +#### A. Data Structure +Add `self.shadow_orders` list to `ScalperHedger`. +```python +self.shadow_orders = [ + { + 'id': 'shadow_123', + 'side': 'BUY', + 'price': 3000.0, + 'created_at': 1700000000, + 'expires_at': 1700000015 + } +] +``` + +#### B. Creation Trigger +Inside the `if oid:` block (successful Taker trade): +1. Determine **Passive Price**: + * If Taker BUY -> Passive Price = `levels['bid']` (Best Bid). + * If Taker SELL -> Passive Price = `levels['ask']` (Best Ask). +2. Append to `self.shadow_orders`. + +#### C. Verification Loop +Inside the main `while True` loop: +1. Iterate through `self.shadow_orders`. +2. **Check Fill:** + * Shadow BUY fills if `current_ask <= shadow_price`. (Someone sold into our bid). + * Shadow SELL fills if `current_bid >= shadow_price`. (Someone bought our ask). +3. **Check Expiry:** If `now > expires_at`, mark as FAILED (Price ran away). +4. **Log & Remove:** Log the result (`[SHADOW] SUCCESS/FAIL`) and remove from list. + +### 3.2 Technical Trade-offs +* **Pros:** Real-time, empirical data on "Maker Feasibility" without risking capital. +* **Cons:** Slight memory usage (negligible). Requires accurate `levels` data every loop. + +## 4. Conclusion +**Recommendation:** Proceed with implementation. This provides the exact "Cost of Waiting" vs "Gain from Spread" data requested. + +## 5. Implementation Plan +- [ ] **Step 1:** Initialize `self.shadow_orders` in `__init__`. +- [ ] **Step 2:** Add `check_shadow_orders(levels)` method. +- [ ] **Step 3:** Call `check_shadow_orders` in main loop. +- [ ] **Step 4:** Record new shadow order after `place_limit_order` (Taker) success. diff --git a/todo/data_needed_for_optymalization.md b/todo/data_needed_for_optymalization.md new file mode 100644 index 0000000..74c8675 --- /dev/null +++ b/todo/data_needed_for_optymalization.md @@ -0,0 +1,118 @@ +# Analysis Request: Data needed for system optimization + +**Status:** Completed +**Date:** 2025-12-20 +**Priority:** Medium + +--- + +## 1. User Description & Query +**Goal:** Check and analyze what kind of historical data is needed for further optimization of the system. +**Context:** Learning from the past how the system worked (good and bad) will help in the future. Assume that any kind of data publicly available can be used. +**Desired Behavior:** List of data which will help to improve now and in the future. + +### Specific Questions +1. Price data, what kind of? +2. CSV, DB? +3. Source of data (Hyperliquid, Uniswap)? +4. Other sources of data? Please propose. + +--- + +## 2. Agent Summary +* **Objective:** Define a comprehensive data strategy to support backtesting, parameter optimization, and performance analysis for the Uniswap CLP + Hyperliquid Hedger system. +* **Key Constraints:** + * **High Frequency:** Hedging logic runs on ~1s ticks. 1-minute candles are insufficient for simulating slippage and "whipsaw" events. + * **Dual Venue:** Must correlate Uniswap V3 (Spot/Liquidity) events with Hyperliquid (Perp/Hedge) actions. + * **Storage:** High-frequency data grows rapidly; format matters. + +## 3. Main Analysis + +### 3.1 Data Types Required + +To fully reconstruct and optimize the strategy, you need three distinct layers of data: + +#### A. Market Data (The "Environment") +1. **Tick-Level Trades (Hyperliquid):** + * *Why:* To simulate realistic slippage, fill probability, and exact trigger timing for the hedger. + * *Fields:* `timestamp_ms`, `price`, `size`, `side`, `liquidation (bool)`. +2. **Order Book Snapshots (Hyperliquid):** + * *Why:* To calculate "effective impact price" for large hedges. The mid-price might be $3000, but selling $50k might execute at $2998. + * *Frequency:* Every 1-5 seconds. +3. **Uniswap V3 Pool Events (Arbitrum):** + * *Why:* To track the exact "Health" of the CLP. Knowing when the price crosses a tick boundary is critical for "In Range" status. + * *Events:* `Swap` (Price changes), `Mint`, `Burn`. + +#### B. System State Data (The "Bot's Brain") +* *Why:* To understand *why* the bot made a decision. A trade might look bad in hindsight, but was correct given the data available at that millisecond. +* *Fields:* `timestamp`, `current_hedge_delta`, `target_hedge_delta`, `rebalance_threshold_used`, `volatility_metric`, `pnl_unrealized`, `pnl_realized`. + +#### C. External "Alpha" Data (Optimization Signals) +* **Funding Rates (Historical):** To optimize long/short bias. +* **Gas Prices (Arbitrum):** To optimize mint/burn timing (don't rebalance CLP if gas > expected fees). +* **Implied Volatility (Deribit/Derebit Options):** Compare realized vol vs. implied vol to adjust `DYNAMIC_THRESHOLD_MULTIPLIER`. + +### 3.2 Technical Options / Trade-offs + +| Option | Pros | Cons | Complexity | +| :--- | :--- | :--- | :--- | +| **A. CSV Files (Flat)** | Simple, human-readable, portable. Good for daily logs. | Slow to query large datasets. Hard to merge multiple streams (e.g., matching Uniswap swap to HL trade). | Low | +| **B. SQLite (Local DB)** | Single file, supports SQL queries, better performance than CSV. | Concurrency limits (one writer). Not great for massive tick data (TB scale). | Low-Medium | +| **C. Time-Series DB (InfluxDB / QuestDB)** | Optimized for high-frequency timestamps. Native downsampling. | Requires running a server/container. Overkill for simple analysis? | High | +| **D. Parquet / HDF5** | Extremely fast read/write for Python (Pandas). High compression. | Not human-readable. Best for "Cold" storage (backtesting). | Medium | + +### 3.3 Proposed Solution Design + +#### Architecture: "Hot" Logging + "Cold" Archival +1. **Live Logging (Hot):** Continue using `JSON` status files and `Log` files for immediate state. +2. **Data Collector Script:** A separate process (or async thread) that dumps high-frequency data into **daily CSVs** or **Parquet** files. +3. **Backtest Engine:** A Python script that loads these Parquet files to simulate "What if threshold was 0.08 instead of 0.05?". + +#### Data Sources +* **Hyperliquid:** Public API (Info) provides L2 snapshots and recent trade history. +* **Uniswap:** The Graph (Subgraphs) or RPC `eth_getLogs`. +* **Dune Analytics:** Great for exporting historical Uniswap V3 data (fees, volumes) to CSV for free/cheap. + +### 3.4 KPI & Performance Metrics +To truly evaluate "Success," we need more than just PnL. We need to compare against benchmarks. + +1. **NAV vs. Benchmark (HODL):** + * *Metric:* `(Current Wallet Value + Position Value) - (Net Inflows)` vs. `(Initial ETH * Current Price)`. + * *Goal:* Did we beat simply holding ETH? + * *Frequency:* Hourly. + +2. **Hedging Efficiency (Delta Neutrality):** + * *Metric:* `Net Delta Exposure = (Uniswap Delta + Hyperliquid Delta)`. + * *Goal:* Should be close to 0. A high standard deviation here means the bot is "loose" or slow. + * *Frequency:* Per-Tick (or aggregated per minute). + +3. **Cost of Hedge (The "Insurance Premium"):** + * *Metric:* `(Hedge Fees Paid + Funding Paid + Hedge Slippage) / Total Portfolio Value`. + * *Goal:* Keep this below the APR earned from Uniswap fees. + * *Frequency:* Daily. + +4. **Fee Coverage Ratio:** + * *Metric:* `Uniswap Fees Earned / Cost of Hedge`. + * *Goal:* Must be > 1.0. If < 1.0, the strategy is burning money to stay neutral. + * *Frequency:* Daily. + +5. **Impermanent Loss (IL) Realized:** + * *Metric:* Value lost due to selling ETH low/buying high during CLP rebalances vs. Fees Earned. + * *Frequency:* Per-Rebalance. + +## 4. Risk Assessment +* **Risk:** **Data Gaps.** If the bot goes offline, you miss market data. + * *Mitigation:* Use public historical APIs (like Hyperliquid's archive or Dune) to fill gaps, rather than relying solely on local recording. +* **Risk:** **Storage Bloat.** Storing every millisecond tick can fill a hard drive in weeks. + * *Mitigation:* Aggregate. Store "1-second OHLC" + "Tick Volume" instead of every raw trade, unless debugging specific slippage events. + +## 5. Conclusion +**Recommendation:** +1. **Immediate:** Start logging **Internal System State** (Thresholds, Volatility metrics) to a structured CSV (`hedge_metrics.csv`). You can't get this from public APIs later. +2. **External Data:** Don't build a complex scraper yet. Rely on downloading public data (Dune/Hyperliquid) when you are ready to backtest. +3. **Format:** Use **Parquet** (via Pandas) for storing price data. It's 10x faster and smaller than CSV. + +## 6. Implementation Plan +- [ ] **Step 1:** Create `tools/data_collector.py` to fetch and save public trade history (HL) daily. +- [ ] **Step 2:** Modify `clp_hedger.py` to append "Decision Metrics" (Vol, Threshold, Delta) to a `metrics.csv` every loop. +- [ ] **Step 3:** Use a notebook (Colab/Jupyter) to load `metrics.csv` and visualize "Threshold vs. Price Deviation". \ No newline at end of file diff --git a/todo/template._for_analisismd b/todo/template._for_analisismd new file mode 100644 index 0000000..8a0e931 --- /dev/null +++ b/todo/template._for_analisismd @@ -0,0 +1,70 @@ +# Analysis Request: [Insert Topic Here] + +**Status:** [Draft / Pending Analysis / Completed] +**Date:** [YYYY-MM-DD] +**Priority:** [Low / Medium / High] + +--- + +## 1. User Description & Query +*(User: Fill this section with your design ideas, questions, code snippets, or links to files/web resources. Be as specific as possible about the goal.)* + +### Context +* **Goal:** +* **Current Behavior:** +* **Desired Behavior:** + +### References +* **Files:** `[filename.py]`, `[path/to/module]` +* **Links:** `[url]` + +### Specific Questions / Hypothesis +1. +2. + +--- + +*(The sections below are to be filled by the AI Agent upon request)* + +## 2. Agent Summary +*(AI: Summarize the user's request to ensure alignment on the objective.)* + +* **Objective:** +* **Key Constraints:** +* **Scope:** + +## 3. Main Analysis +*(AI: Perform the deep dive here. Use codebase knowledge, logic, and simulations.)* + +### 3.1 Codebase Investigation +* **Affected Components:** +* **Data Flow Analysis:** +* **Current Limitation/Bug:** + +### 3.2 Technical Options / Trade-offs +| Option | Pros | Cons | Complexity | +| :--- | :--- | :--- | :--- | +| **A. [Strategy A]** | ... | ... | ... | +| **B. [Strategy B]** | ... | ... | ... | + +### 3.3 Proposed Solution Design +* **Architecture:** +* **Logic Changes:** +* **Edge Cases Considered:** + +## 4. Risk Assessment +*(AI: What could go wrong? Performance, Security, or Stability impacts.)* + +* **Risk 1:** [Description] -> *Mitigation:* [Strategy] + +## 5. Conclusion +*(AI: Final verdict and recommendation.)* + +* **Recommendation:** + +## 6. Implementation Plan +*(AI: Step-by-step checklist to execute the recommendation.)* + +- [ ] Step 1: +- [ ] Step 2: +- [ ] Step 3: