Compare commits
35 Commits
74902034d4
...
e8d7db2743
| Author | SHA1 | Date | |
|---|---|---|---|
| e8d7db2743 | |||
| bdd2d607cd | |||
| ac843b0f82 | |||
| b8ad857ca4 | |||
| bf88c16383 | |||
| 3000a366be | |||
| f8afdc1ab1 | |||
| fd7b208fff | |||
| 1165060bc0 | |||
| 0210bc93bc | |||
| 596fcde0bf | |||
| 5f9109c3a9 | |||
| d650bb5fe2 | |||
| 93363750ae | |||
| 541a71d2a6 | |||
| 76a858a7df | |||
| fe5cc8e1d1 | |||
| 5805601218 | |||
| afbb4e4976 | |||
| 75c0cc77cc | |||
| 5a05f0d190 | |||
| cac4405866 | |||
| 2eef7dbc17 | |||
| 70f3d48336 | |||
| 64f7866083 | |||
| 6812c481e5 | |||
| 2b55851136 | |||
| de9e61d4cf | |||
| ebd22b6863 | |||
| 603a506c4e | |||
| 25df8b8ba9 | |||
| 0d53200882 | |||
| bbfb549fbb | |||
| 323a3f31de | |||
| ac8ac31d01 |
24
.env.example
Normal file
24
.env.example
Normal file
@ -0,0 +1,24 @@
|
||||
# Example environment variables for the Hyperliquid trading toolkit
|
||||
# Copy this file to .env and fill in real values. Do NOT commit your real .env file.
|
||||
|
||||
# Main wallet (used only to authorize agents on-chain)
|
||||
# Example: MAIN_WALLET_PRIVATE_KEY=0x...
|
||||
MAIN_WALLET_PRIVATE_KEY=
|
||||
MAIN_WALLET_ADDRESS=
|
||||
|
||||
# Agent keys (private keys authorized via create_agent.py)
|
||||
# Preferred patterns:
|
||||
# - AGENT_PRIVATE_KEY: default agent
|
||||
# - <NAME>_AGENT_PK or <NAME>_AGENT_PRIVATE_KEY: per-agent keys (e.g., SCALPER_AGENT_PK)
|
||||
# Example: AGENT_PRIVATE_KEY=0x...
|
||||
AGENT_PRIVATE_KEY=
|
||||
# Example per-agent key:
|
||||
# SCALPER_AGENT_PK=
|
||||
# SWING_AGENT_PK=
|
||||
|
||||
# Optional: CoinGecko API key to reduce rate limits for market cap fetches
|
||||
COINGECKO_API_KEY=
|
||||
|
||||
# Optional: Set a custom environment for development/testing
|
||||
# E.g., DEBUG=true
|
||||
DEBUG=
|
||||
41
.gitignore
vendored
Normal file
41
.gitignore
vendored
Normal file
@ -0,0 +1,41 @@
|
||||
# --- Secrets & Environment ---
|
||||
# Ignore local environment variables
|
||||
.env
|
||||
# Ignore virtual environment folders
|
||||
.venv/
|
||||
venv/
|
||||
|
||||
# --- Python ---
|
||||
# Ignore cache files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
|
||||
# --- Data & Logs ---
|
||||
# Ignore all database files (db, write-ahead log, shared memory)
|
||||
_data/*.db
|
||||
_data/*.db-shm
|
||||
_data/*.db-wal
|
||||
|
||||
# Ignore all JSON files in the data folder
|
||||
_data/*.json
|
||||
|
||||
# Ignore all log files
|
||||
_logs/
|
||||
|
||||
# --- SDK ---
|
||||
# Ignore all contents of the sdk directory
|
||||
sdk/
|
||||
|
||||
# --- Other ---
|
||||
# Ignore custom agents directory
|
||||
agents/
|
||||
|
||||
# Ignore Jekyll files
|
||||
.nojekyll
|
||||
|
||||
# --- Editor & OS Files ---
|
||||
# Ignore VSCode, JetBrains, and macOS/Windows system files
|
||||
.vscode/
|
||||
.idea/
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
127
GEMINI.md
Normal file
127
GEMINI.md
Normal file
@ -0,0 +1,127 @@
|
||||
# Project Overview
|
||||
|
||||
This project is a sophisticated, multi-process automated trading bot for the Hyperliquid decentralized exchange. It is written in Python and uses a modular architecture to separate concerns like data fetching, strategy execution, and trade management.
|
||||
|
||||
The bot uses a high-performance data pipeline with SQLite for storing market data. Trading strategies are defined and configured in a JSON file, allowing for easy adjustments without code changes. The system supports multiple, independent trading agents for risk segregation and PNL tracking. A live terminal dashboard provides real-time monitoring of market data, strategy signals, and the status of all background processes.
|
||||
|
||||
## Building and Running
|
||||
|
||||
### 1. Setup
|
||||
|
||||
1. **Create and activate a virtual environment:**
|
||||
```bash
|
||||
# For Windows
|
||||
python -m venv .venv
|
||||
.\.venv\Scripts\activate
|
||||
|
||||
# For macOS/Linux
|
||||
python3 -m venv .venv
|
||||
source .venv/bin/activate
|
||||
```
|
||||
|
||||
2. **Install dependencies:**
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
|
||||
3. **Configure environment variables:**
|
||||
Create a `.env` file in the root of the project (you can copy `.env.example`) and add your Hyperliquid wallet private key and any agent keys.
|
||||
|
||||
4. **Configure strategies:**
|
||||
Edit `_data/strategies.json` to enable and configure your desired trading strategies.
|
||||
|
||||
### 2. Running the Bot
|
||||
|
||||
To run the main application, which includes the dashboard and all background processes, execute the following command:
|
||||
|
||||
```bash
|
||||
python main_app.py
|
||||
```
|
||||
|
||||
## Development Conventions
|
||||
|
||||
* **Modularity:** The project is divided into several scripts, each with a specific responsibility (e.g., `data_fetcher.py`, `trade_executor.py`).
|
||||
* **Configuration-driven:** Strategies are defined in `_data/strategies.json`, not hardcoded. This allows for easy management of strategies.
|
||||
* **Multi-processing:** The application uses the `multiprocessing` module to run different components in parallel for performance and stability.
|
||||
* **Strategies:** Custom strategies should inherit from the `BaseStrategy` class (defined in `strategies/base_strategy.py`) and implement the `calculate_signals` method.
|
||||
* **Documentation:** The `WIKI/` directory contains detailed documentation for the project. Start with `WIKI/SUMMARY.md`.
|
||||
|
||||
# Project Review and Recommendations
|
||||
|
||||
This review provides an analysis of the current state of the automated trading bot project, proposes specific code improvements, and identifies files that appear to be unused or are one-off utilities that could be reorganized.
|
||||
|
||||
The project is a well-structured, multi-process Python application for crypto trading. It has a clear separation of concerns between data fetching, strategy execution, and trade management. The use of `multiprocessing` and a centralized `main_app.py` orchestrator is a solid architectural choice.
|
||||
|
||||
The following sections detail recommendations for improving configuration management, code structure, and robustness, along with a list of files recommended for cleanup.
|
||||
|
||||
---
|
||||
|
||||
## Proposed Code Changes
|
||||
|
||||
### 1. Centralize Configuration
|
||||
|
||||
- **Issue:** Key configuration variables like `WATCHED_COINS` and `required_timeframes` are hardcoded in `main_app.py`. This makes them difficult to change without modifying the source code.
|
||||
- **Proposal:**
|
||||
- Create a central configuration file, e.g., `_data/config.json`.
|
||||
- Move `WATCHED_COINS` and `required_timeframes` into this new file.
|
||||
- Load this configuration in `main_app.py` at startup.
|
||||
- **Benefit:** Decouples configuration from code, making the application more flexible and easier to manage.
|
||||
|
||||
### 2. Refactor `main_app.py` for Clarity
|
||||
|
||||
- **Issue:** `main_app.py` is long and handles multiple responsibilities: process orchestration, dashboard rendering, and data reading.
|
||||
- **Proposal:**
|
||||
- **Abstract Process Management:** The functions for running subprocesses (e.g., `run_live_candle_fetcher`, `run_resampler_job`) contain repetitive logic for logging, shutdown handling, and process looping. This could be abstracted into a generic `ProcessRunner` class.
|
||||
- **Create a Dashboard Class:** The complex dashboard rendering logic could be moved into a separate `Dashboard` class to improve separation of concerns and make the main application loop cleaner.
|
||||
- **Benefit:** Improves code readability, reduces duplication, and makes the application easier to maintain and extend.
|
||||
|
||||
### 3. Improve Project Structure
|
||||
|
||||
- **Issue:** The root directory is cluttered with numerous Python scripts, making it difficult to distinguish between core application files, utility scripts, and old/example files.
|
||||
- **Proposal:**
|
||||
- Create a `scripts/` directory and move all one-off utility and maintenance scripts into it.
|
||||
- Consider creating a `src/` or `app/` directory to house the core application source code (`main_app.py`, `trade_executor.py`, etc.), separating it clearly from configuration, data, and documentation.
|
||||
- **Benefit:** A cleaner, more organized project structure that is easier for new developers to understand.
|
||||
|
||||
### 4. Enhance Robustness and Error Handling
|
||||
|
||||
- **Issue:** The agent loading in `trade_executor.py` relies on discovering environment variables by a naming convention (`_AGENT_PK`). This is clever but can be brittle if environment variables are named incorrectly.
|
||||
- **Proposal:**
|
||||
- Explicitly define the agent names and their corresponding environment variable keys in the proposed `_data/config.json` file. The `trade_executor` would then load only the agents specified in the configuration.
|
||||
- **Benefit:** Makes agent configuration more explicit and less prone to errors from stray environment variables.
|
||||
|
||||
---
|
||||
|
||||
## Identified Unused/Utility Files
|
||||
|
||||
The following files were identified as likely being unused by the core application, being obsolete, or serving as one-off utilities. It is recommended to **move them to a `scripts/` directory** or **delete them** if they are obsolete.
|
||||
|
||||
### Obsolete / Old Versions:
|
||||
- `data_fetcher_old.py`
|
||||
- `market_old.py`
|
||||
- `base_strategy.py` (The one in the root directory; the one in `strategies/` is used).
|
||||
|
||||
### One-Off Utility Scripts (Recommend moving to `scripts/`):
|
||||
- `!migrate_to_sqlite.py`
|
||||
- `import_csv.py`
|
||||
- `del_market_cap_tables.py`
|
||||
- `fix_timestamps.py`
|
||||
- `list_coins.py`
|
||||
- `create_agent.py`
|
||||
|
||||
### Examples / Unused Code:
|
||||
- `basic_ws.py` (Appears to be an example file).
|
||||
- `backtester.py`
|
||||
- `strategy_sma_cross.py` (A strategy file in the root, not in the `strategies` folder).
|
||||
- `strategy_template.py`
|
||||
|
||||
### Standalone / Potentially Unused Core Files:
|
||||
The following files seem to have their logic already integrated into the main multi-process application. They might be remnants of a previous architecture and may not be needed as standalone scripts.
|
||||
- `address_monitor.py`
|
||||
- `position_monitor.py`
|
||||
- `trade_log.py`
|
||||
- `wallet_data.py`
|
||||
- `whale_tracker.py`
|
||||
|
||||
### Data / Log Files (Recommend archiving or deleting):
|
||||
- `hyperliquid_wallet_data_*.json` (These appear to be backups or logs).
|
||||
300
IMPROVEMENT_ROADMAP.md
Normal file
300
IMPROVEMENT_ROADMAP.md
Normal file
@ -0,0 +1,300 @@
|
||||
# Improvement Roadmap - Hyperliquid Trading Bot
|
||||
|
||||
## Overview
|
||||
This document outlines the detailed implementation plan for transforming the trading bot into a production-ready system.
|
||||
|
||||
## Phase 1: Foundation (Weeks 1-2)
|
||||
|
||||
### Week 1: Security & Stability
|
||||
|
||||
#### Day 1-2: Critical Security Fixes
|
||||
- [ ] **Implement Encrypted Key Storage**
|
||||
- Create `security/key_manager.py`
|
||||
- Replace environment variable key access
|
||||
- Add key rotation mechanism
|
||||
- **Files**: `trade_executor.py`, `create_agent.py`
|
||||
|
||||
- [ ] **Add Input Validation Framework**
|
||||
- Create `validation/trading_validator.py`
|
||||
- Validate all trading parameters
|
||||
- Add sanitization for user inputs
|
||||
- **Files**: `position_manager.py`, `trade_executor.py`
|
||||
|
||||
#### Day 3-4: Risk Management
|
||||
- [ ] **Implement Circuit Breakers**
|
||||
- Create `risk/circuit_breaker.py`
|
||||
- Add trading halt conditions
|
||||
- Implement automatic recovery
|
||||
- **Files**: `trade_executor.py`, `position_manager.py`
|
||||
|
||||
- [ ] **Fix Import Resolution Issues**
|
||||
- Update relative imports
|
||||
- Add `__init__.py` files where missing
|
||||
- Test all module imports
|
||||
- **Files**: `main_app.py`, all strategy files
|
||||
|
||||
#### Day 5-7: Code Quality
|
||||
- [ ] **Refactor Dashboard Display**
|
||||
- Extract `DashboardRenderer` class
|
||||
- Split into market/strategy/position components
|
||||
- Add configuration for display options
|
||||
- **Files**: `main_app.py`
|
||||
|
||||
### Week 2: Configuration & Error Handling
|
||||
|
||||
#### Day 8-9: Configuration Management
|
||||
- [ ] **Create Centralized Configuration**
|
||||
- Create `config/settings.py`
|
||||
- Move all magic numbers to config
|
||||
- Add environment-specific configs
|
||||
- **Files**: All Python files
|
||||
|
||||
- [ ] **Standardize Error Handling**
|
||||
- Create `utils/error_handlers.py`
|
||||
- Implement retry decorators
|
||||
- Add structured exception classes
|
||||
- **Files**: All core modules
|
||||
|
||||
#### Day 10-12: Database Improvements
|
||||
- [ ] **Implement Connection Pool**
|
||||
- Create `database/connection_pool.py`
|
||||
- Replace direct SQLite connections
|
||||
- Add connection health monitoring
|
||||
- **Files**: `base_strategy.py`, all data access files
|
||||
|
||||
- [ ] **Add Database Migrations**
|
||||
- Create `database/migrations/`
|
||||
- Version control schema changes
|
||||
- Add rollback capabilities
|
||||
- **Files**: Database schema files
|
||||
|
||||
#### Day 13-14: Basic Testing
|
||||
- [ ] **Create Test Framework**
|
||||
- Set up `tests/` directory structure
|
||||
- Add pytest configuration
|
||||
- Create test fixtures and mocks
|
||||
- **Files**: New test files
|
||||
|
||||
## Phase 2: Performance & Testing (Weeks 3-4)
|
||||
|
||||
### Week 3: Performance Optimization
|
||||
|
||||
#### Day 15-17: Caching Layer
|
||||
- [ ] **Implement Redis/Memory Cache**
|
||||
- Create `cache/cache_manager.py`
|
||||
- Cache frequently accessed data
|
||||
- Add cache invalidation logic
|
||||
- **Files**: `data_fetcher.py`, `base_strategy.py`
|
||||
|
||||
#### Day 18-19: Async Operations
|
||||
- [ ] **Convert to Async/Await**
|
||||
- Identify blocking operations
|
||||
- Convert to async patterns
|
||||
- Add async context managers
|
||||
- **Files**: `live_market_utils.py`, API calls
|
||||
|
||||
#### Day 20-21: Batch Processing
|
||||
- [ ] **Implement Batch Operations**
|
||||
- Batch database writes
|
||||
- Bulk API requests
|
||||
- Optimize data processing
|
||||
- **Files**: Data processing modules
|
||||
|
||||
### Week 4: Testing Framework
|
||||
|
||||
#### Day 22-24: Unit Tests
|
||||
- [ ] **Comprehensive Unit Test Suite**
|
||||
- Test all core classes
|
||||
- Mock external dependencies
|
||||
- Achieve >80% coverage
|
||||
- **Files**: `tests/unit/`
|
||||
|
||||
#### Day 25-26: Integration Tests
|
||||
- [ ] **End-to-End Testing**
|
||||
- Test complete workflows
|
||||
- Mock Hyperliquid API
|
||||
- Test process communication
|
||||
- **Files**: `tests/integration/`
|
||||
|
||||
#### Day 27-28: Paper Trading
|
||||
- [ ] **Paper Trading Mode**
|
||||
- Create simulation environment
|
||||
- Mock trade execution
|
||||
- Add performance tracking
|
||||
- **Files**: `trade_executor.py`, new simulation files
|
||||
|
||||
## Phase 3: Monitoring & Observability (Weeks 5-6)
|
||||
|
||||
### Week 5: Metrics & Monitoring
|
||||
|
||||
#### Day 29-31: Metrics Collection
|
||||
- [ ] **Add Prometheus Metrics**
|
||||
- Create `monitoring/metrics.py`
|
||||
- Track key performance indicators
|
||||
- Add custom business metrics
|
||||
- **Files**: All core modules
|
||||
|
||||
#### Day 32-33: Health Checks
|
||||
- [ ] **Health Check System**
|
||||
- Create `monitoring/health_check.py`
|
||||
- Monitor all system components
|
||||
- Add dependency checks
|
||||
- **Files**: `main_app.py`, all processes
|
||||
|
||||
#### Day 34-35: Alerting
|
||||
- [ ] **Alerting System**
|
||||
- Create `monitoring/alerts.py`
|
||||
- Configure alert rules
|
||||
- Add notification channels
|
||||
- **Files**: New alerting files
|
||||
|
||||
### Week 6: Documentation & Developer Experience
|
||||
|
||||
#### Day 36-38: API Documentation
|
||||
- [ ] **Auto-Generated Docs**
|
||||
- Set up Sphinx/ MkDocs
|
||||
- Document all public APIs
|
||||
- Add code examples
|
||||
- **Files**: `docs/` directory
|
||||
|
||||
#### Day 39-40: Setup Improvements
|
||||
- [ ] **Interactive Setup**
|
||||
- Create setup wizard
|
||||
- Validate configuration
|
||||
- Add guided configuration
|
||||
- **Files**: `setup.py`, new setup files
|
||||
|
||||
#### Day 41-42: Examples & Guides
|
||||
- [ ] **Strategy Examples**
|
||||
- Create example strategies
|
||||
- Add development tutorials
|
||||
- Document best practices
|
||||
- **Files**: `examples/`, `WIKI/`
|
||||
|
||||
## Phase 4: Advanced Features (Weeks 7-8)
|
||||
|
||||
### Week 7: Advanced Risk Management
|
||||
|
||||
#### Day 43-45: Position Sizing
|
||||
- [ ] **Dynamic Position Sizing**
|
||||
- Volatility-based sizing
|
||||
- Portfolio risk metrics
|
||||
- Kelly criterion implementation
|
||||
- **Files**: `position_manager.py`, new risk modules
|
||||
|
||||
#### Day 46-47: Advanced Orders
|
||||
- [ ] **Advanced Order Types**
|
||||
- Stop-loss orders
|
||||
- Take-profit orders
|
||||
- Conditional orders
|
||||
- **Files**: `trade_executor.py`
|
||||
|
||||
#### Day 48-49: Portfolio Management
|
||||
- [ ] **Portfolio Optimization**
|
||||
- Correlation analysis
|
||||
- Risk parity allocation
|
||||
- Rebalancing logic
|
||||
- **Files**: New portfolio modules
|
||||
|
||||
### Week 8: Production Readiness
|
||||
|
||||
#### Day 50-52: Deployment
|
||||
- [ ] **Production Deployment**
|
||||
- Docker containerization
|
||||
- Kubernetes manifests
|
||||
- CI/CD pipeline
|
||||
- **Files**: `docker/`, `.github/workflows/`
|
||||
|
||||
#### Day 53-54: Performance Profiling
|
||||
- [ ] **Profiling Tools**
|
||||
- Performance monitoring
|
||||
- Memory usage tracking
|
||||
- Bottleneck identification
|
||||
- **Files**: New profiling modules
|
||||
|
||||
#### Day 55-56: Final Polish
|
||||
- [ ] **Production Hardening**
|
||||
- Security audit
|
||||
- Load testing
|
||||
- Documentation review
|
||||
- **Files**: All files
|
||||
|
||||
## Implementation Guidelines
|
||||
|
||||
### Daily Workflow
|
||||
1. **Morning Standup**: Review progress, identify blockers
|
||||
2. **Development**: Focus on assigned tasks
|
||||
3. **Testing**: Write tests alongside code
|
||||
4. **Code Review**: Peer review all changes
|
||||
5. **Documentation**: Update docs with changes
|
||||
|
||||
### Quality Gates
|
||||
- All code must pass linting and formatting
|
||||
- New features require unit tests
|
||||
- Integration tests for critical paths
|
||||
- Security review for sensitive changes
|
||||
|
||||
### Risk Mitigation
|
||||
- Feature flags for new functionality
|
||||
- Gradual rollout with monitoring
|
||||
- Rollback procedures for each change
|
||||
- Regular backup and recovery testing
|
||||
|
||||
## Success Criteria
|
||||
|
||||
### Phase 1 Success
|
||||
- [ ] All security vulnerabilities fixed
|
||||
- [ ] Import resolution issues resolved
|
||||
- [ ] Basic test framework in place
|
||||
- [ ] Configuration management implemented
|
||||
|
||||
### Phase 2 Success
|
||||
- [ ] Performance improvements measured
|
||||
- [ ] Test coverage >80%
|
||||
- [ ] Paper trading mode functional
|
||||
- [ ] Async operations implemented
|
||||
|
||||
### Phase 3 Success
|
||||
- [ ] Monitoring dashboard operational
|
||||
- [ ] Alerting system functional
|
||||
- [ ] Documentation complete
|
||||
- [ ] Developer experience improved
|
||||
|
||||
### Phase 4 Success
|
||||
- [ ] Production deployment ready
|
||||
- [ ] Advanced features working
|
||||
- [ ] Performance benchmarks met
|
||||
- [ ] Security audit passed
|
||||
|
||||
## Resource Requirements
|
||||
|
||||
### Development Team
|
||||
- **Senior Python Developer**: Lead architecture and security
|
||||
- **Backend Developer**: Performance and database optimization
|
||||
- **DevOps Engineer**: Deployment and monitoring
|
||||
- **QA Engineer**: Testing framework and automation
|
||||
|
||||
### Tools & Services
|
||||
- **Development**: PyCharm/VSCode, Git, Docker
|
||||
- **Testing**: Pytest, Mock, Coverage tools
|
||||
- **Monitoring**: Prometheus, Grafana, AlertManager
|
||||
- **CI/CD**: GitHub Actions, Docker Hub
|
||||
- **Documentation**: Sphinx/MkDocs, ReadTheDocs
|
||||
|
||||
### Infrastructure
|
||||
- **Development**: Local development environment
|
||||
- **Testing**: Staging environment with test data
|
||||
- **Production**: Cloud deployment with monitoring
|
||||
- **Backup**: Automated backup and recovery system
|
||||
|
||||
## Timeline Summary
|
||||
|
||||
| Phase | Duration | Key Deliverables |
|
||||
|-------|----------|------------------|
|
||||
| Phase 1 | 2 weeks | Security fixes, basic testing, configuration |
|
||||
| Phase 2 | 2 weeks | Performance optimization, comprehensive testing |
|
||||
| Phase 3 | 2 weeks | Monitoring, documentation, developer tools |
|
||||
| Phase 4 | 2 weeks | Advanced features, production deployment |
|
||||
| **Total** | **8 weeks** | **Production-ready trading system** |
|
||||
|
||||
This roadmap provides a structured approach to transforming the trading bot into a robust, scalable, and maintainable system suitable for production use.
|
||||
1
PROJECT_REVIEW_AND_PROPOSALS.md
Normal file
1
PROJECT_REVIEW_AND_PROPOSALS.md
Normal file
@ -0,0 +1 @@
|
||||
"# Comprehensive Project Review and Improvement Proposals"
|
||||
88
README.md
Normal file
88
README.md
Normal file
@ -0,0 +1,88 @@
|
||||
# Automated Crypto Trading Bot
|
||||
|
||||
This project is a sophisticated, multi-process automated trading bot designed to interact with the Hyperliquid decentralized exchange. It features a robust data pipeline, a flexible strategy engine, multi-agent trade execution, and a live terminal dashboard for real-time monitoring.
|
||||
|
||||
<!-- It's a good idea to take a screenshot of your dashboard and upload it to a service like Imgur to include here -->
|
||||
|
||||
## Features
|
||||
|
||||
* **Multi-Process Architecture**: Core components (data fetching, trading, strategies) run in parallel processes for maximum performance and stability.
|
||||
* **Comprehensive Data Pipeline**:
|
||||
* Live price feeds for all assets.
|
||||
* Historical candle data collection for any coin and timeframe.
|
||||
* Historical market cap data fetching from the CoinGecko API.
|
||||
* **High-Performance Database**: Uses SQLite with pandas for fast, indexed storage and retrieval of all market data.
|
||||
* **Configuration-Driven Strategies**: Trading strategies are defined and managed in a simple JSON file (`_data/strategies.json`), allowing for easy configuration without code changes.
|
||||
* **Multi-Agent Trading**: Supports multiple, independent trading agents for advanced risk segregation and PNL tracking.
|
||||
* **Live Terminal Dashboard**: A real-time, flicker-free dashboard to monitor live prices, market caps, strategy signals, and the status of all background processes.
|
||||
* **Secure Key Management**: Uses a `.env` file to securely manage all private keys and API keys, keeping them separate from the codebase.
|
||||
|
||||
## Project Structure
|
||||
|
||||
The project is composed of several key scripts that work together:
|
||||
|
||||
* **`main_app.py`**: The central orchestrator. It launches all background processes and displays the main monitoring dashboard.
|
||||
* **`trade_executor.py`**: The trading "brain." It reads signals from all active strategies and executes trades using the appropriate agent.
|
||||
* **`data_fetcher.py`**: A background service that collects 1-minute historical candle data and saves it to the SQLite database.
|
||||
* **`resampler.py`**: A background service that reads the 1-minute data and generates all other required timeframes (e.g., 5m, 1h, 1d).
|
||||
* **`market_cap_fetcher.py`**: A scheduled service to download daily market cap data.
|
||||
* **`strategy_*.py`**: Individual files containing the logic for different types of trading strategies (e.g., SMA Crossover).
|
||||
* **`_data/strategies.json`**: The configuration file for defining and enabling/disabling your trading strategies.
|
||||
* **`.env`**: The secure file for storing all your private keys and API keys.
|
||||
|
||||
## Installation
|
||||
|
||||
1. **Clone the Repository**
|
||||
```bash
|
||||
git clone [https://github.com/your-username/your-repo-name.git](https://github.com/your-username/your-repo-name.git)
|
||||
cd your-repo-name
|
||||
```
|
||||
2. **Create and Activate a Virtual Environment**
|
||||
```bash
|
||||
# For Windows
|
||||
python -m venv .venv
|
||||
.\.venv\Scripts\activate
|
||||
# For macOS/Linux
|
||||
python3 -m venv .venv
|
||||
source .venv/bin/activate
|
||||
```
|
||||
3. **Install Dependencies**
|
||||
```bash
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
## Getting Started: Configuration
|
||||
|
||||
Before running the application, you must configure your wallets, agents, and API keys.
|
||||
|
||||
1. Create the .env File In the root of the project, create a file named .env. Copy the following content into it and replace the placeholder values with your actual keys.
|
||||
|
||||
2. **Activate Your Main Wallet on Hyperliquid**
|
||||
The `trade_executor.py` script will fail if your main wallet is not registered.
|
||||
* Go to the Hyperliquid website, connect your main wallet, and make a small deposit. This is a one-time setup step.
|
||||
3. **Create and Authorize Trading Agents**
|
||||
The `trade_executor.py` uses secure "agent" keys that can trade but cannot withdraw. You need to generate these and authorize them with your main wallet.
|
||||
* Run the `create_agent.py` script
|
||||
```bash
|
||||
python create_agent.py
|
||||
```
|
||||
The script will output a new Agent Private Key. Copy this key and add it to your .env file (e.g., as SCALPER_AGENT_PK). Repeat this for each agent you want to create.
|
||||
4. **Configure**
|
||||
Your Strategies Open the `_data/strategies.json` file to define which strategies you want to run.
|
||||
* Set "enabled": true to activate a strategy.
|
||||
* Assign an "agent" (e.g., "scalper", "swing") to each strategy. The agent name must correspond to a key in your .env file (e.g., SCALPER_AGENT_PK -> "scalper").
|
||||
* Configure the parameters for each strategy, such as the coin, timeframe, and any indicator settings.
|
||||
|
||||
##Usage##
|
||||
Once everything is configured, you can run the main application from your terminal:
|
||||
```bash
|
||||
python main_app.py
|
||||
```
|
||||
|
||||
## Documentation
|
||||
|
||||
Detailed project documentation is available in the `WIKI/` directory. Start with the summary page:
|
||||
|
||||
`WIKI/SUMMARY.md`
|
||||
|
||||
This contains links and explanations for `OVERVIEW.md`, `SETUP.md`, `SCRIPTS.md`, and other helpful pages that describe usage, data layout, agent management, development notes, and troubleshooting.
|
||||
|
||||
5
WIKI/.gitattributes
vendored
Normal file
5
WIKI/.gitattributes
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
# Treat markdown files as text with LF normalization
|
||||
*.md text eol=lf
|
||||
|
||||
# Ensure JSON files are treated as text
|
||||
*.json text
|
||||
34
WIKI/AGENTS.md
Normal file
34
WIKI/AGENTS.md
Normal file
@ -0,0 +1,34 @@
|
||||
Agents and Keys
|
||||
|
||||
This project supports running multiple agent identities (private keys) to place orders on Hyperliquid. Agents are lightweight keys authorized on-chain by your main wallet.
|
||||
|
||||
Agent storage and environment
|
||||
|
||||
- For security, agent private keys should be stored as environment variables and not checked into source control.
|
||||
- Supported patterns:
|
||||
- `AGENT_PRIVATE_KEY` (single default agent)
|
||||
- `<NAME>_AGENT_PK` or `<NAME>_AGENT_PRIVATE_KEY` (per-agent keys)
|
||||
|
||||
Discovering agents
|
||||
|
||||
- `trade_executor.py` scans environment variables for agent keys and loads them into `Exchange` objects so each agent can sign orders independently.
|
||||
|
||||
Creating and authorizing agents
|
||||
|
||||
- Use `create_agent.py` with your `MAIN_WALLET_PRIVATE_KEY` to authorize a new agent name. The script will attempt to call `exchange.approve_agent(agent_name)` and print the returned agent private key.
|
||||
|
||||
Security notes
|
||||
|
||||
- Never commit private keys to Git. Keep them in a secure secrets store or local `.env` file excluded from version control.
|
||||
- Rotate keys if they are ever exposed and re-authorize agents using your main wallet.
|
||||
|
||||
Example `.env` snippet
|
||||
|
||||
MAIN_WALLET_PRIVATE_KEY=<your-main-wallet-private-key>
|
||||
MAIN_WALLET_ADDRESS=<your-main-wallet-address>
|
||||
AGENT_PRIVATE_KEY=<agent-private-key>
|
||||
EXECUTOR_SCALPER_AGENT_PK=<agent-private-key-for-scalper>
|
||||
|
||||
File `agents`
|
||||
|
||||
- This repository may contain a local `agents` file used as a quick snapshot; treat it as insecure and remove it from the repo or add it to `.gitignore` if it contains secrets.
|
||||
20
WIKI/CONTRIBUTING.md
Normal file
20
WIKI/CONTRIBUTING.md
Normal file
@ -0,0 +1,20 @@
|
||||
Contributing
|
||||
|
||||
Thanks for considering contributing! Please follow these guidelines to make the process smooth.
|
||||
|
||||
How to contribute
|
||||
|
||||
1. Fork the repository and create a feature branch for your change.
|
||||
2. Keep changes focused and add tests where appropriate.
|
||||
3. Submit a Pull Request with a clear description and the reason for the change.
|
||||
|
||||
Coding standards
|
||||
|
||||
- Keep functions small and well-documented.
|
||||
- Use the existing logging utilities for consistent output.
|
||||
- Prefer safe, incremental changes for financial code.
|
||||
|
||||
Security and secrets
|
||||
|
||||
- Never commit private keys, API keys, or secrets. Use environment variables or a secrets manager.
|
||||
- If you accidentally commit secrets, rotate them immediately.
|
||||
31
WIKI/DATA.md
Normal file
31
WIKI/DATA.md
Normal file
@ -0,0 +1,31 @@
|
||||
Data layout and formats
|
||||
|
||||
This section describes the `_data/` directory and the important files used by the scripts.
|
||||
|
||||
Important files
|
||||
|
||||
- `_data/market_data.db` — SQLite database that stores candle tables. Tables are typically named `<COIN>_<INTERVAL>` (e.g., `BTC_1m`, `ETH_5m`).
|
||||
- `_data/coin_precision.json` — Mapping of coin names to their size precision (created by `list_coins.py`).
|
||||
- `_data/current_prices.json` — Latest market prices that `market.py` writes.
|
||||
- `_data/fetcher_status.json` — Last run metadata from `data_fetcher.py`.
|
||||
- `_data/market_cap_data.json` — Market cap summary saved by `market_cap_fetcher.py`.
|
||||
- `_data/strategies.json` — Configuration for strategies (enabled flag, parameters).
|
||||
- `_data/strategy_status_<name>.json` — Per-strategy runtime status including last signal and price.
|
||||
- `_data/executor_managed_positions.json` — Which strategy is currently managing which live position (used by `trade_executor`).
|
||||
|
||||
Candle schema
|
||||
|
||||
Each candle table contains columns similar to:
|
||||
- `timestamp_ms` (INTEGER) — milliseconds since epoch
|
||||
- `open`, `high`, `low`, `close` (FLOAT)
|
||||
- `volume` (FLOAT)
|
||||
- `number_of_trades` (INTEGER)
|
||||
|
||||
Trade logs
|
||||
|
||||
- Persistent trade history is stored in `_logs/trade_history.csv` with the following columns: `timestamp_utc`, `strategy`, `coin`, `action`, `price`, `size`, `signal`, `pnl`.
|
||||
|
||||
Backups and maintenance
|
||||
|
||||
- Periodically back up `_data/market_data.db`. The WAL and SHM files are also present when SQLite uses WAL mode.
|
||||
- Keep JSON config/state files under version control only if they contain no secrets.
|
||||
24
WIKI/DEVELOPMENT.md
Normal file
24
WIKI/DEVELOPMENT.md
Normal file
@ -0,0 +1,24 @@
|
||||
Development and testing
|
||||
|
||||
Code style and conventions
|
||||
|
||||
- Python 3.11+ with typing hints where helpful.
|
||||
- Use `logging_utils.setup_logging` for consistent logs across scripts.
|
||||
|
||||
Running tests
|
||||
|
||||
- This repository doesn't currently include a formal test suite. Suggested quick checks:
|
||||
- Run `python list_coins.py` to verify connectivity to Hyperliquid Info.
|
||||
- Run `python -m pyflakes .` or `python -m pylint` if you have linters installed.
|
||||
|
||||
Adding a new strategy
|
||||
|
||||
1. Create a new script following the pattern in `strategy_template.py`.
|
||||
2. Add an entry to `_data/strategies.json` with `enabled: true` and relevant parameters.
|
||||
3. Ensure the strategy writes a status JSON file (`_data/strategy_status_<name>.json`) and uses `trade_log.log_trade` to record actions.
|
||||
|
||||
Recommended improvements (low-risk)
|
||||
|
||||
- Add a lightweight unit test suite (pytest) for core functions like timeframe parsing, SQL helpers, and signal calculation.
|
||||
- Add CI (GitHub Actions) to run flake/pylint and unit tests on PRs.
|
||||
- Move secrets handling to a `.env.example` and document environment variables in `WIKI/SETUP.md`.
|
||||
29
WIKI/OVERVIEW.md
Normal file
29
WIKI/OVERVIEW.md
Normal file
@ -0,0 +1,29 @@
|
||||
Hyperliquid Trading Toolkit
|
||||
|
||||
This repository contains a collection of utility scripts, data fetchers, resamplers, trading strategies, and a trade executor for working with Hyperliquid trading APIs and crawled data. It is organized to support data collection, transformation, strategy development, and automated execution via agents.
|
||||
|
||||
Key components
|
||||
|
||||
- Data fetching and management: `data_fetcher.py`, `market.py`, `resampler.py`, `market_cap_fetcher.py`, `list_coins.py`
|
||||
- Strategies: `strategy_sma_cross.py`, `strategy_template.py`, `strategy_sma_125d.py` (if present)
|
||||
- Execution: `trade_executor.py`, `create_agent.py`, `agents` helper
|
||||
- Utilities: `logging_utils.py`, `trade_log.py`
|
||||
- Data storage: SQLite database in `_data/market_data.db` and JSON files in `_data`
|
||||
|
||||
Intended audience
|
||||
|
||||
- Developers building strategies and automations on Hyperliquid
|
||||
- Data engineers collecting and processing market data
|
||||
- Operators running the fetchers and executors on a scheduler or as system services
|
||||
|
||||
Project goals
|
||||
|
||||
- Reliable collection of 1m candles and resampling to common timeframes
|
||||
- Clean separation between data, strategies, and execution
|
||||
- Lightweight logging and traceable trade records
|
||||
|
||||
Where to start
|
||||
|
||||
- Read `WIKI/SETUP.md` to prepare your environment
|
||||
- Use `WIKI/SCRIPTS.md` for a description of individual scripts and how to run them
|
||||
- Inspect `WIKI/AGENTS.md` to understand agent keys and how to manage them
|
||||
47
WIKI/SCRIPTS.md
Normal file
47
WIKI/SCRIPTS.md
Normal file
@ -0,0 +1,47 @@
|
||||
Scripts and How to Use Them
|
||||
|
||||
This file documents the main scripts in the repository and their purpose, typical runtime parameters, and key notes.
|
||||
|
||||
list_coins.py
|
||||
- Purpose: Fetches asset metadata from Hyperliquid (name and size/precision) and saves `_data/coin_precision.json`.
|
||||
- Usage: `python list_coins.py`
|
||||
- Notes: Reads `hyperliquid.info.Info` and writes a JSON file. Useful to run before market feeders.
|
||||
|
||||
market.py (MarketDataFeeder)
|
||||
- Purpose: Fetches live prices from Hyperliquid and writes `_data/current_prices.json` while printing a live table.
|
||||
- Usage: `python market.py --log-level normal`
|
||||
- Notes: Expects `_data/coin_precision.json` to exist.
|
||||
|
||||
data_fetcher.py (CandleFetcherDB)
|
||||
- Purpose: Fetches historical 1m candles and stores them in `_data/market_data.db` using a table-per-coin naming convention.
|
||||
- Usage: `python data_fetcher.py --coins BTC ETH --interval 1m --days 7`
|
||||
- Notes: Can be run regularly by a scheduler to keep the DB up to date.
|
||||
|
||||
resampler.py (Resampler)
|
||||
- Purpose: Reads 1m candles from SQLite and resamples to configured timeframes (e.g. 5m, 15m, 1h), appending new candles to tables.
|
||||
- Usage: `python resampler.py --coins BTC ETH --timeframes 5m 15m 1h --log-level normal`
|
||||
|
||||
market_cap_fetcher.py (MarketCapFetcher)
|
||||
- Purpose: Pulls CoinGecko market cap numbers and maintains historical daily tables in the same SQLite DB.
|
||||
- Usage: `python market_cap_fetcher.py --coins BTC ETH --log-level normal`
|
||||
- Notes: Optional `COINGECKO_API_KEY` in `.env` avoids throttling.
|
||||
|
||||
strategy_sma_cross.py (SmaCrossStrategy)
|
||||
- Purpose: Run an SMA-based trading strategy. Reads candles from `_data/market_data.db` and writes status to `_data/strategy_status_<name>.json`.
|
||||
- Usage: `python strategy_sma_cross.py --name sma_cross_1 --params '{"coin":"BTC","timeframe":"1m","fast":5,"slow":20}' --log-level normal`
|
||||
|
||||
trade_executor.py (TradeExecutor)
|
||||
- Purpose: Orchestrates agent-based order execution using agent private keys found in environment variables. Uses `_data/strategies.json` to determine active strategies.
|
||||
- Usage: `python trade_executor.py --log-level normal`
|
||||
- Notes: Requires `MAIN_WALLET_ADDRESS` and agent keys. See `create_agent.py` to authorize agents on-chain.
|
||||
|
||||
create_agent.py
|
||||
- Purpose: Authorizes a new on-chain agent using your main wallet (requires `MAIN_WALLET_PRIVATE_KEY`).
|
||||
- Usage: `python create_agent.py`
|
||||
- Notes: Prints the new agent private key to stdout — save it securely.
|
||||
|
||||
trade_log.py
|
||||
- Purpose: Provides a thread-safe CSV trade history logger. Used by the executor and strategies to record actions.
|
||||
|
||||
Other utility scripts
|
||||
- import_csv.py, fix_timestamps.py, list_coins.py, etc. — see file headers for details.
|
||||
42
WIKI/SETUP.md
Normal file
42
WIKI/SETUP.md
Normal file
@ -0,0 +1,42 @@
|
||||
Setup and Installation
|
||||
|
||||
Prerequisites
|
||||
|
||||
- Python 3.11+ (project uses modern dependencies)
|
||||
- Git (optional)
|
||||
- A Hyperliquid account and an activated main wallet if you want to authorize agents and trade
|
||||
|
||||
Virtual environment
|
||||
|
||||
1. Create a virtual environment:
|
||||
|
||||
python -m venv .venv
|
||||
|
||||
2. Activate the virtual environment (PowerShell on Windows):
|
||||
|
||||
.\.venv\Scripts\Activate.ps1
|
||||
|
||||
3. Upgrade pip and install dependencies:
|
||||
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
|
||||
Configuration
|
||||
|
||||
- Copy `.env.example` to `.env` and set the following variables as required:
|
||||
- MAIN_WALLET_PRIVATE_KEY (used by `create_agent.py` to authorize agents)
|
||||
- MAIN_WALLET_ADDRESS (used by `trade_executor.py`)
|
||||
- AGENT_PRIVATE_KEY or per-agent keys like `EXECUTOR_SCALPER_AGENT_PK`
|
||||
- Optional: COINGECKO_API_KEY for `market_cap_fetcher.py` to avoid rate limits
|
||||
|
||||
Data directory
|
||||
|
||||
- The project writes and reads data from the `_data/` folder. Ensure the directory exists and is writable by the user running the scripts.
|
||||
|
||||
Quick test
|
||||
|
||||
After installing packages, run `list_coins.py` in a dry run to verify connectivity to the Hyperliquid info API:
|
||||
|
||||
python list_coins.py
|
||||
|
||||
If you encounter import errors, ensure the virtual environment is active and the `requirements.txt` dependencies are installed.
|
||||
15
WIKI/SUMMARY.md
Normal file
15
WIKI/SUMMARY.md
Normal file
@ -0,0 +1,15 @@
|
||||
Project Wiki Summary
|
||||
|
||||
This directory contains human-friendly documentation for the project. Files:
|
||||
|
||||
- `OVERVIEW.md` — High-level overview and where to start
|
||||
- `SETUP.md` — Environment setup and quick test steps
|
||||
- `SCRIPTS.md` — Per-script documentation and usage examples
|
||||
- `AGENTS.md` — How agents work and secure handling of keys
|
||||
- `DATA.md` — Data folder layout and schema notes
|
||||
- `DEVELOPMENT.md` — Developer guidance and recommended improvements
|
||||
- `CONTRIBUTING.md` — How to contribute safely
|
||||
- `TROUBLESHOOTING.md` — Common problems and solutions
|
||||
|
||||
Notes:
|
||||
- These pages were generated from repository source files and common patterns in trading/data projects. Validate any sensitive information (agent keys) and remove them from the repository when sharing.
|
||||
21
WIKI/TROUBLESHOOTING.md
Normal file
21
WIKI/TROUBLESHOOTING.md
Normal file
@ -0,0 +1,21 @@
|
||||
Troubleshooting common issues
|
||||
|
||||
1. Import errors
|
||||
- Ensure the virtual environment is active.
|
||||
- Run `pip install -r requirements.txt`.
|
||||
|
||||
2. Agent authorization failures
|
||||
- Ensure your main wallet is activated on Hyperliquid and has funds.
|
||||
- The `create_agent.py` script will print helpful messages if the vault (main wallet) cannot act.
|
||||
|
||||
3. SQLite locked errors
|
||||
- Increase the SQLite timeout when opening connections (this project uses a 10s timeout in fetcher). Close other programs that may hold the DB open.
|
||||
|
||||
4. Missing coin precision file
|
||||
- Run `python list_coins.py` to regenerate `_data/coin_precision.json`.
|
||||
|
||||
5. Rate limits from CoinGecko
|
||||
- Set `COINGECKO_API_KEY` in your `.env` file and ensure the fetcher respects backoff.
|
||||
|
||||
6. Agent keys in `agents` file or other local files
|
||||
- Treat any `agents` file with private keys as compromised; rotate keys and remove the file from the repository.
|
||||
Binary file not shown.
BIN
__pycache__/trade_log.cpython-313.pyc
Normal file
BIN
__pycache__/trade_log.cpython-313.pyc
Normal file
Binary file not shown.
18
_data/backtesting_conf.json
Normal file
18
_data/backtesting_conf.json
Normal file
@ -0,0 +1,18 @@
|
||||
{
|
||||
"sma_cross_eth_5m": {
|
||||
"strategy_name": "sma_cross_1",
|
||||
"script": "strategies.ma_cross_strategy.MaCrossStrategy",
|
||||
"optimization_params": {
|
||||
"fast": {
|
||||
"start": 5,
|
||||
"end": 150,
|
||||
"step": 1
|
||||
},
|
||||
"slow": {
|
||||
"start": 0,
|
||||
"end": 0,
|
||||
"step": 1
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
231
_data/candles/hyperliquid-historical.py
Normal file
231
_data/candles/hyperliquid-historical.py
Normal file
@ -0,0 +1,231 @@
|
||||
import boto3
|
||||
from botocore import UNSIGNED
|
||||
from botocore.config import Config
|
||||
from botocore.exceptions import ClientError
|
||||
import os
|
||||
import argparse
|
||||
from datetime import datetime, timedelta
|
||||
import asyncio
|
||||
import lz4.frame
|
||||
from pathlib import Path
|
||||
import csv
|
||||
import json
|
||||
|
||||
|
||||
|
||||
# MUST USE PATHLIB INSTEAD
|
||||
DIR_PATH = Path(__file__).parent
|
||||
BUCKET = "hyperliquid-archive"
|
||||
CSV_HEADER = ["datetime", "timestamp", "level", "price", "size", "number"]
|
||||
|
||||
# s3 = boto3.client('s3', config=Config(signature_version=UNSIGNED))
|
||||
# s3.download_file('hyperliquid-archive', 'market_data/20230916/9/l2Book/SOL.lz4', f"{dir_path}/SOL.lz4")
|
||||
|
||||
# earliest date: 20230415/0/
|
||||
|
||||
|
||||
|
||||
def get_args():
|
||||
parser = argparse.ArgumentParser(description="Retrieve historical tick level market data from Hyperliquid exchange")
|
||||
subparser = parser.add_subparsers(dest="tool", required=True, help="tool: download, decompress, to_csv")
|
||||
|
||||
global_parser = subparser.add_parser("global_settings", add_help=False)
|
||||
global_parser.add_argument("t", metavar="Tickers", help="Tickers of assets to be downloaded seperated by spaces. e.g. BTC ETH", nargs="+")
|
||||
global_parser.add_argument("--all", help="Apply action to all available dates and times.", action="store_true", default=False)
|
||||
global_parser.add_argument("--anonymous", help="Use anonymous (unsigned) S3 requests. Defaults to signed requests if not provided.", action="store_true", default=False)
|
||||
global_parser.add_argument("-sd", metavar="Start date", help="Starting date as one unbroken string formatted: YYYYMMDD. e.g. 20230916")
|
||||
global_parser.add_argument("-sh", metavar="Start hour", help="Hour of the starting day as an integer between 0 and 23. e.g. 9 Default: 0", type=int, default=0)
|
||||
global_parser.add_argument("-ed", metavar="End date", help="Ending date as one unbroken string formatted: YYYYMMDD. e.g. 20230916")
|
||||
global_parser.add_argument("-eh", metavar="End hour", help="Hour of the ending day as an integer between 0 and 23. e.g. 9 Default: 23", type=int, default=23)
|
||||
|
||||
|
||||
download_parser = subparser.add_parser("download", help="Download historical market data", parents=[global_parser])
|
||||
decompress_parser = subparser.add_parser("decompress", help="Decompress downloaded lz4 data", parents=[global_parser])
|
||||
to_csv_parser = subparser.add_parser("to_csv", help="Convert decompressed downloads into formatted CSV", parents=[global_parser])
|
||||
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
|
||||
|
||||
def make_date_list(start_date, end_date):
|
||||
start_date = datetime.strptime(start_date, '%Y%m%d')
|
||||
end_date = datetime.strptime(end_date, '%Y%m%d')
|
||||
|
||||
date_list = []
|
||||
|
||||
current_date = start_date
|
||||
while current_date <= end_date:
|
||||
date_list.append(current_date.strftime('%Y%m%d'))
|
||||
current_date += timedelta(days=1)
|
||||
|
||||
return date_list
|
||||
|
||||
|
||||
|
||||
|
||||
def make_date_hour_list(date_list, start_hour, end_hour, delimiter="/"):
|
||||
date_hour_list = []
|
||||
end_date = date_list[-1]
|
||||
hour = start_hour
|
||||
end = 23
|
||||
for date in date_list:
|
||||
if date == end_date:
|
||||
end = end_hour
|
||||
|
||||
while hour <= end:
|
||||
date_hour = date + delimiter + str(hour)
|
||||
date_hour_list.append(date_hour)
|
||||
hour += 1
|
||||
|
||||
hour = 0
|
||||
|
||||
return date_hour_list
|
||||
|
||||
|
||||
|
||||
|
||||
async def download_object(s3, asset, date_hour):
|
||||
date_and_hour = date_hour.split("/")
|
||||
key = f"market_data/{date_hour}/l2Book/{asset}.lz4"
|
||||
dest = f"{DIR_PATH}/downloads/{asset}/{date_and_hour[0]}-{date_and_hour[1]}.lz4"
|
||||
try:
|
||||
s3.download_file(BUCKET, key, dest)
|
||||
except ClientError as e:
|
||||
# Print a concise message and continue. Common errors: 403 Forbidden, 404 Not Found.
|
||||
code = e.response.get('Error', {}).get('Code') if hasattr(e, 'response') else 'Unknown'
|
||||
print(f"Failed to download {key}: {code} - {e}")
|
||||
return
|
||||
|
||||
|
||||
|
||||
|
||||
async def download_objects(s3, assets, date_hour_list):
|
||||
print(f"Downloading {len(date_hour_list)} objects...")
|
||||
for asset in assets:
|
||||
await asyncio.gather(*[download_object(s3, asset, date_hour) for date_hour in date_hour_list])
|
||||
|
||||
|
||||
|
||||
|
||||
async def decompress_file(asset, date_hour):
|
||||
lz_file_path = DIR_PATH / "downloads" / asset / f"{date_hour}.lz4"
|
||||
file_path = DIR_PATH / "downloads" / asset / date_hour
|
||||
|
||||
if not lz_file_path.is_file():
|
||||
print(f"decompress_file: file not found: {lz_file_path}")
|
||||
return
|
||||
|
||||
with lz4.frame.open(lz_file_path, mode='r') as lzfile:
|
||||
data = lzfile.read()
|
||||
with open(file_path, "wb") as file:
|
||||
file.write(data)
|
||||
|
||||
|
||||
|
||||
|
||||
async def decompress_files(assets, date_hour_list):
|
||||
print(f"Decompressing {len(date_hour_list)} files...")
|
||||
for asset in assets:
|
||||
await asyncio.gather(*[decompress_file(asset, date_hour) for date_hour in date_hour_list])
|
||||
|
||||
|
||||
|
||||
|
||||
def write_rows(csv_writer, line):
|
||||
rows = []
|
||||
entry = json.loads(line)
|
||||
date_time = entry["time"]
|
||||
timestamp = str(entry["raw"]["data"]["time"])
|
||||
all_orders = entry["raw"]["data"]["levels"]
|
||||
|
||||
for i, order_level in enumerate(all_orders):
|
||||
level = str(i + 1)
|
||||
for order in order_level:
|
||||
price = order["px"]
|
||||
size = order["sz"]
|
||||
number = str(order["n"])
|
||||
|
||||
rows.append([date_time, timestamp, level, price, size, number])
|
||||
|
||||
for row in rows:
|
||||
csv_writer.writerow(row)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
async def convert_file(asset, date_hour):
|
||||
file_path = DIR_PATH / "downloads" / asset / date_hour
|
||||
csv_path = DIR_PATH / "csv" / asset / f"{date_hour}.csv"
|
||||
|
||||
with open(csv_path, "w", newline='') as csv_file:
|
||||
csv_writer = csv.writer(csv_file, dialect="excel")
|
||||
csv_writer.writerow(CSV_HEADER)
|
||||
|
||||
with open(file_path) as file:
|
||||
for line in file:
|
||||
write_rows(csv_writer, line)
|
||||
|
||||
|
||||
|
||||
|
||||
async def files_to_csv(assets, date_hour_list):
|
||||
print(f"Converting {len(date_hour_list)} files to CSV...")
|
||||
for asset in assets:
|
||||
await asyncio.gather(*[convert_file(asset, date_hour) for date_hour in date_hour_list])
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def main():
|
||||
print(DIR_PATH)
|
||||
args = get_args()
|
||||
|
||||
# Create S3 client according to whether anonymous access was requested.
|
||||
if getattr(args, 'anonymous', False):
|
||||
s3 = boto3.client('s3', config=Config(signature_version=UNSIGNED))
|
||||
else:
|
||||
s3 = boto3.client('s3')
|
||||
|
||||
downloads_path = DIR_PATH / "downloads"
|
||||
downloads_path.mkdir(exist_ok=True)
|
||||
|
||||
csv_path = DIR_PATH / "csv"
|
||||
csv_path.mkdir(exist_ok=True)
|
||||
|
||||
for asset in args.t:
|
||||
downloads_asset_path = downloads_path / asset
|
||||
downloads_asset_path.mkdir(exist_ok=True)
|
||||
csv_asset_path = csv_path / asset
|
||||
csv_asset_path.mkdir(exist_ok=True)
|
||||
|
||||
date_list = make_date_list(args.sd, args.ed)
|
||||
loop = asyncio.new_event_loop()
|
||||
|
||||
if args.tool == "download":
|
||||
date_hour_list = make_date_hour_list(date_list, args.sh, args.eh)
|
||||
loop.run_until_complete(download_objects(s3, args.t, date_hour_list))
|
||||
loop.close()
|
||||
|
||||
if args.tool == "decompress":
|
||||
date_hour_list = make_date_hour_list(date_list, args.sh, args.eh, delimiter="-")
|
||||
loop.run_until_complete(decompress_files(args.t, date_hour_list))
|
||||
loop.close()
|
||||
|
||||
if args.tool == "to_csv":
|
||||
date_hour_list = make_date_hour_list(date_list, args.sh, args.eh, delimiter="-")
|
||||
loop.run_until_complete(files_to_csv(args.t, date_hour_list))
|
||||
loop.close()
|
||||
|
||||
|
||||
print("Done")
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
8
_data/candles/requirements.txt
Normal file
8
_data/candles/requirements.txt
Normal file
@ -0,0 +1,8 @@
|
||||
boto3==1.34.131
|
||||
botocore==1.34.131
|
||||
jmespath==1.0.1
|
||||
lz4==4.3.3
|
||||
python-dateutil==2.9.0.post0
|
||||
s3transfer==0.10.1
|
||||
six==1.16.0
|
||||
urllib3==2.2.2
|
||||
208
_data/coin_id_map.json
Normal file
208
_data/coin_id_map.json
Normal file
@ -0,0 +1,208 @@
|
||||
{
|
||||
"0G": "zero-gravity",
|
||||
"2Z": "doublezero",
|
||||
"AAVE": "aave",
|
||||
"ACE": "endurance",
|
||||
"ADA": "ada-the-dog",
|
||||
"AI": "sleepless-ai",
|
||||
"AI16Z": "ai16z",
|
||||
"AIXBT": "aixbt",
|
||||
"ALGO": "dear-algorithm",
|
||||
"ALT": "altlayer",
|
||||
"ANIME": "anime-token",
|
||||
"APE": "ape-3",
|
||||
"APEX": "apex-token-2",
|
||||
"APT": "aptos",
|
||||
"AR": "arweave",
|
||||
"ARB": "osmosis-allarb",
|
||||
"ARK": "ark-3",
|
||||
"ASTER": "astar",
|
||||
"ATOM": "lost-bitcoin-layer",
|
||||
"AVAX": "binance-peg-avalanche",
|
||||
"AVNT": "avantis",
|
||||
"BABY": "baby-2",
|
||||
"BADGER": "badger-dao",
|
||||
"BANANA": "nforbanana",
|
||||
"BCH": "bitcoin-cash",
|
||||
"BERA": "berachain-bera",
|
||||
"BIGTIME": "big-time",
|
||||
"BIO": "bio-protocol",
|
||||
"BLAST": "blast",
|
||||
"BLUR": "blur",
|
||||
"BLZ": "bluzelle",
|
||||
"BNB": "binancecoin",
|
||||
"BNT": "bancor",
|
||||
"BOME": "book-of-meme",
|
||||
"BRETT": "brett",
|
||||
"BSV": "bitcoin-cash-sv",
|
||||
"BTC": "bitcoin",
|
||||
"CAKE": "pancakeswap-token",
|
||||
"CANTO": "canto",
|
||||
"CATI": "catizen",
|
||||
"CELO": "celo",
|
||||
"CFX": "cosmic-force-token-v2",
|
||||
"CHILLGUY": "just-a-chill-guy",
|
||||
"COMP": "compound-governance-token",
|
||||
"CRV": "curve-dao-token",
|
||||
"CYBER": "cyberconnect",
|
||||
"DOGE": "doge-on-pulsechain",
|
||||
"DOOD": "doodles",
|
||||
"DOT": "xcdot",
|
||||
"DYDX": "dydx-chain",
|
||||
"DYM": "dymension",
|
||||
"EIGEN": "eigenlayer",
|
||||
"ENA": "ethena",
|
||||
"ENS": "ethereum-name-service",
|
||||
"ETC": "ethereum-classic",
|
||||
"ETH": "ethereum",
|
||||
"ETHFI": "ether-fi",
|
||||
"FARTCOIN": "fartcoin-2",
|
||||
"FET": "fetch-ai",
|
||||
"FIL": "filecoin",
|
||||
"FRIEND": "friend-tech",
|
||||
"FTM": "fantom",
|
||||
"FTT": "ftx-token",
|
||||
"GALA": "gala",
|
||||
"GAS": "gas",
|
||||
"GMT": "stepn",
|
||||
"GMX": "gmx",
|
||||
"GOAT": "goat",
|
||||
"GRASS": "grass-3",
|
||||
"GRIFFAIN": "griffain",
|
||||
"HBAR": "hedera-hashgraph",
|
||||
"HEMI": "hemi",
|
||||
"HMSTR": "hamster-kombat",
|
||||
"HYPE": "hyperliquid",
|
||||
"HYPER": "hyper-4",
|
||||
"ILV": "illuvium",
|
||||
"IMX": "immutable-x",
|
||||
"INIT": "initia",
|
||||
"INJ": "injective-protocol",
|
||||
"IO": "io",
|
||||
"IOTA": "iota-2",
|
||||
"IP": "story-2",
|
||||
"JELLY": "jelly-time",
|
||||
"JTO": "jito-governance-token",
|
||||
"JUP": "jupiter-exchange-solana",
|
||||
"KAITO": "kaito",
|
||||
"KAS": "wrapped-kaspa",
|
||||
"LAUNCHCOIN": "ben-pasternak",
|
||||
"LAYER": "unilayer",
|
||||
"LDO": "linea-bridged-ldo-linea",
|
||||
"LINEA": "linea",
|
||||
"LINK": "osmosis-alllink",
|
||||
"LISTA": "lista",
|
||||
"LOOM": "loom",
|
||||
"LTC": "litecoin",
|
||||
"MANTA": "manta-network",
|
||||
"MATIC": "matic-network",
|
||||
"MAV": "maverick-protocol",
|
||||
"MAVIA": "heroes-of-mavia",
|
||||
"ME": "magic-eden",
|
||||
"MEGA": "megaeth",
|
||||
"MELANIA": "melania-meme",
|
||||
"MEME": "mpx6900",
|
||||
"MERL": "merlin-chain",
|
||||
"MET": "metya",
|
||||
"MEW": "cat-in-a-dogs-world",
|
||||
"MINA": "mina-protocol",
|
||||
"MKR": "maker",
|
||||
"MNT": "mynth",
|
||||
"MON": "mon-protocol",
|
||||
"MOODENG": "moo-deng-2",
|
||||
"MORPHO": "morpho",
|
||||
"MOVE": "movement",
|
||||
"MYRO": "myro",
|
||||
"NEAR": "near",
|
||||
"NEO": "neo",
|
||||
"NIL": "nillion",
|
||||
"NOT": "nothing-3",
|
||||
"NTRN": "neutron-3",
|
||||
"NXPC": "nexpace",
|
||||
"OGN": "origin-protocol",
|
||||
"OM": "mantra-dao",
|
||||
"OMNI": "omni-2",
|
||||
"ONDO": "ondo-finance",
|
||||
"OP": "optimism",
|
||||
"ORBS": "orbs",
|
||||
"ORDI": "ordinals",
|
||||
"OX": "ox-fun",
|
||||
"PANDORA": "pandora",
|
||||
"PAXG": "pax-gold",
|
||||
"PENDLE": "pendle",
|
||||
"PENGU": "pudgy-penguins",
|
||||
"PEOPLE": "constitutiondao-wormhole",
|
||||
"PIXEL": "pixel-3",
|
||||
"PNUT": "pnut",
|
||||
"POL": "proof-of-liquidity",
|
||||
"POLYX": "polymesh",
|
||||
"POPCAT": "popcat",
|
||||
"PROMPT": "wayfinder",
|
||||
"PROVE": "succinct",
|
||||
"PUMP": "pump-fun",
|
||||
"PURR": "purr-2",
|
||||
"PYTH": "pyth-network",
|
||||
"RDNT": "radiant-capital",
|
||||
"RENDER": "render-token",
|
||||
"REQ": "request-network",
|
||||
"RESOLV": "resolv",
|
||||
"REZ": "renzo",
|
||||
"RLB": "rollbit-coin",
|
||||
"RSR": "reserve-rights-token",
|
||||
"RUNE": "thorchain",
|
||||
"S": "token-s",
|
||||
"SAGA": "saga-2",
|
||||
"SAND": "the-sandbox-wormhole",
|
||||
"SCR": "scroll",
|
||||
"SEI": "sei-network",
|
||||
"SHIA": "shiba-saga",
|
||||
"SKY": "sky",
|
||||
"SNX": "havven",
|
||||
"SOL": "solana",
|
||||
"SOPH": "sophon",
|
||||
"SPX": "spx6900",
|
||||
"STBL": "stbl",
|
||||
"STG": "stargate-finance",
|
||||
"STRAX": "stratis",
|
||||
"STRK": "starknet",
|
||||
"STX": "stox",
|
||||
"SUI": "sui",
|
||||
"SUPER": "superfarm",
|
||||
"SUSHI": "sushi",
|
||||
"SYRUP": "syrup",
|
||||
"TAO": "the-anthropic-order",
|
||||
"TIA": "tia",
|
||||
"TNSR": "tensorium",
|
||||
"TON": "tontoken",
|
||||
"TRB": "tellor",
|
||||
"TRUMP": "trumpeffect69420",
|
||||
"TRX": "tron-bsc",
|
||||
"TST": "test-3",
|
||||
"TURBO": "turbo",
|
||||
"UMA": "uma",
|
||||
"UNI": "uni",
|
||||
"UNIBOT": "unibot",
|
||||
"USTC": "wrapped-ust",
|
||||
"USUAL": "usual",
|
||||
"VINE": "vine",
|
||||
"VIRTUAL": "virtual-protocol",
|
||||
"VVV": "venice-token",
|
||||
"W": "w",
|
||||
"WCT": "connect-token-wct",
|
||||
"WIF": "wif-secondchance",
|
||||
"WLD": "worldcoin-wld",
|
||||
"WLFI": "world-liberty-financial",
|
||||
"XAI": "xai-blockchain",
|
||||
"XLM": "stellar",
|
||||
"XPL": "pulse-2",
|
||||
"XRP": "ripple",
|
||||
"YGG": "yield-guild-games",
|
||||
"YZY": "yzy",
|
||||
"ZEC": "zcash",
|
||||
"ZEN": "zenith-3",
|
||||
"ZEREBRO": "zerebro",
|
||||
"ZETA": "zeta",
|
||||
"ZK": "zksync",
|
||||
"ZORA": "zora",
|
||||
"ZRO": "layerzero"
|
||||
}
|
||||
@ -101,6 +101,7 @@
|
||||
"MAV": 0,
|
||||
"MAVIA": 1,
|
||||
"ME": 1,
|
||||
"MEGA": 0,
|
||||
"MELANIA": 1,
|
||||
"MEME": 0,
|
||||
"MERL": 0,
|
||||
|
||||
1043
_data/market_cap_data.json
Normal file
1043
_data/market_cap_data.json
Normal file
File diff suppressed because it is too large
Load Diff
BIN
_data/market_data.db-shm
Normal file
BIN
_data/market_data.db-shm
Normal file
Binary file not shown.
11
_data/opened_positions.json
Normal file
11
_data/opened_positions.json
Normal file
@ -0,0 +1,11 @@
|
||||
{
|
||||
"copy_trader_eth_ETH": {
|
||||
"strategy": "copy_trader_eth",
|
||||
"coin": "ETH",
|
||||
"side": "long",
|
||||
"open_time_utc": "2025-11-02T20:35:02.988272+00:00",
|
||||
"open_price": 3854.9,
|
||||
"amount": 0.0055,
|
||||
"leverage": 3
|
||||
}
|
||||
}
|
||||
51
_data/strategies.json
Normal file
51
_data/strategies.json
Normal file
@ -0,0 +1,51 @@
|
||||
{
|
||||
"sma_cross_1": {
|
||||
"enabled": false,
|
||||
"class": "strategies.ma_cross_strategy.MaCrossStrategy",
|
||||
"agent": "scalper_agent",
|
||||
"parameters": {
|
||||
"coin": "ETH",
|
||||
"timeframe": "15m",
|
||||
"short_ma": 7,
|
||||
"long_ma": 44,
|
||||
"size": 0.0055,
|
||||
"leverage_long": 5,
|
||||
"leverage_short": 5
|
||||
}
|
||||
},
|
||||
"sma_44d_btc": {
|
||||
"enabled": false,
|
||||
"class": "strategies.single_sma_strategy.SingleSmaStrategy",
|
||||
"parameters": {
|
||||
"agent": "swing",
|
||||
"coin": "BTC",
|
||||
"timeframe": "1d",
|
||||
"sma_period": 44,
|
||||
"size": 0.0001,
|
||||
"leverage_long": 3,
|
||||
"leverage_short": 1
|
||||
}
|
||||
},
|
||||
"copy_trader_eth": {
|
||||
"enabled": true,
|
||||
"is_event_driven": true,
|
||||
"class": "strategies.copy_trader_strategy.CopyTraderStrategy",
|
||||
"parameters": {
|
||||
"agent": "scalper",
|
||||
"target_address": "0x32885a6adac4375858E6edC092EfDDb0Ef46484C",
|
||||
"coins_to_copy": {
|
||||
"ETH": {
|
||||
"size": 0.0055,
|
||||
"leverage_long": 3,
|
||||
"leverage_short": 3
|
||||
},
|
||||
"BTC": {
|
||||
"size": 0.0002,
|
||||
"leverage_long": 1,
|
||||
"leverage_short": 1
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
7
_data/strategy_state_copy_trader_eth.json
Normal file
7
_data/strategy_state_copy_trader_eth.json
Normal file
@ -0,0 +1,7 @@
|
||||
{
|
||||
"ETH": {
|
||||
"side": "long",
|
||||
"size": 0.018,
|
||||
"entry": 3864.2
|
||||
}
|
||||
}
|
||||
7
_data/strategy_status_copy_trader_eth.json
Normal file
7
_data/strategy_status_copy_trader_eth.json
Normal file
@ -0,0 +1,7 @@
|
||||
{
|
||||
"strategy_name": "copy_trader_eth",
|
||||
"current_signal": "WAIT",
|
||||
"last_signal_change_utc": null,
|
||||
"signal_price": null,
|
||||
"last_checked_utc": "2025-11-02T09:55:08.460168+00:00"
|
||||
}
|
||||
290
_data/wallets_info.json
Normal file
290
_data/wallets_info.json
Normal file
@ -0,0 +1,290 @@
|
||||
{
|
||||
"Whale 1 (BTC Maxi)": {
|
||||
"address": "0xb83de012dba672c76a7dbbbf3e459cb59d7d6e36",
|
||||
"core_state": {
|
||||
"raw_state": {
|
||||
"marginSummary": {
|
||||
"accountValue": "30018881.1193690002",
|
||||
"totalNtlPos": "182930683.6996490061",
|
||||
"totalRawUsd": "212949564.8190180063",
|
||||
"totalMarginUsed": "22969943.9848450013"
|
||||
},
|
||||
"crossMarginSummary": {
|
||||
"accountValue": "30018881.1193690002",
|
||||
"totalNtlPos": "182930683.6996490061",
|
||||
"totalRawUsd": "212949564.8190180063",
|
||||
"totalMarginUsed": "22969943.9848450013"
|
||||
},
|
||||
"crossMaintenanceMarginUsed": "5420634.4984849999",
|
||||
"withdrawable": "7043396.1885489998",
|
||||
"assetPositions": [
|
||||
{
|
||||
"type": "oneWay",
|
||||
"position": {
|
||||
"coin": "BTC",
|
||||
"szi": "-546.94441",
|
||||
"leverage": {
|
||||
"type": "cross",
|
||||
"value": 10
|
||||
},
|
||||
"entryPx": "115183.2",
|
||||
"positionValue": "62795781.6009199992",
|
||||
"unrealizedPnl": "203045.067519",
|
||||
"returnOnEquity": "0.0322299761",
|
||||
"liquidationPx": "159230.7089577085",
|
||||
"marginUsed": "6279578.1600919999",
|
||||
"maxLeverage": 40,
|
||||
"cumFunding": {
|
||||
"allTime": "-6923407.0911370004",
|
||||
"sinceOpen": "-6923407.0970780002",
|
||||
"sinceChange": "-1574.188052"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "oneWay",
|
||||
"position": {
|
||||
"coin": "ETH",
|
||||
"szi": "-13938.989",
|
||||
"leverage": {
|
||||
"type": "cross",
|
||||
"value": 10
|
||||
},
|
||||
"entryPx": "4106.64",
|
||||
"positionValue": "58064252.5784000009",
|
||||
"unrealizedPnl": "-821803.895073",
|
||||
"returnOnEquity": "-0.1435654683",
|
||||
"liquidationPx": "5895.7059682083",
|
||||
"marginUsed": "5806425.2578400001",
|
||||
"maxLeverage": 25,
|
||||
"cumFunding": {
|
||||
"allTime": "-6610045.8844170002",
|
||||
"sinceOpen": "-6610045.8844170002",
|
||||
"sinceChange": "-730.403023"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "oneWay",
|
||||
"position": {
|
||||
"coin": "SOL",
|
||||
"szi": "-75080.68",
|
||||
"leverage": {
|
||||
"type": "cross",
|
||||
"value": 10
|
||||
},
|
||||
"entryPx": "201.3063",
|
||||
"positionValue": "14975592.4328000005",
|
||||
"unrealizedPnl": "138627.573942",
|
||||
"returnOnEquity": "0.0917199656",
|
||||
"liquidationPx": "519.0933515657",
|
||||
"marginUsed": "1497559.2432800001",
|
||||
"maxLeverage": 20,
|
||||
"cumFunding": {
|
||||
"allTime": "-792893.154387",
|
||||
"sinceOpen": "-922.301401",
|
||||
"sinceChange": "-187.682929"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "oneWay",
|
||||
"position": {
|
||||
"coin": "DOGE",
|
||||
"szi": "-109217.0",
|
||||
"leverage": {
|
||||
"type": "cross",
|
||||
"value": 10
|
||||
},
|
||||
"entryPx": "0.279959",
|
||||
"positionValue": "22081.49306",
|
||||
"unrealizedPnl": "8494.879599",
|
||||
"returnOnEquity": "2.7782496288",
|
||||
"liquidationPx": "213.2654356057",
|
||||
"marginUsed": "2208.149306",
|
||||
"maxLeverage": 10,
|
||||
"cumFunding": {
|
||||
"allTime": "-1875.469799",
|
||||
"sinceOpen": "-1875.469799",
|
||||
"sinceChange": "45.79339"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "oneWay",
|
||||
"position": {
|
||||
"coin": "INJ",
|
||||
"szi": "-18747.2",
|
||||
"leverage": {
|
||||
"type": "cross",
|
||||
"value": 3
|
||||
},
|
||||
"entryPx": "13.01496",
|
||||
"positionValue": "162200.7744",
|
||||
"unrealizedPnl": "81793.4435",
|
||||
"returnOnEquity": "1.005680924",
|
||||
"liquidationPx": "1208.3529290194",
|
||||
"marginUsed": "54066.9248",
|
||||
"maxLeverage": 10,
|
||||
"cumFunding": {
|
||||
"allTime": "-539.133533",
|
||||
"sinceOpen": "-539.133533",
|
||||
"sinceChange": "-7.367325"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "oneWay",
|
||||
"position": {
|
||||
"coin": "SUI",
|
||||
"szi": "-376577.6",
|
||||
"leverage": {
|
||||
"type": "cross",
|
||||
"value": 3
|
||||
},
|
||||
"entryPx": "3.85881",
|
||||
"positionValue": "989495.3017599999",
|
||||
"unrealizedPnl": "463648.956001",
|
||||
"returnOnEquity": "0.9571980625",
|
||||
"liquidationPx": "64.3045458208",
|
||||
"marginUsed": "329831.767253",
|
||||
"maxLeverage": 10,
|
||||
"cumFunding": {
|
||||
"allTime": "-45793.455728",
|
||||
"sinceOpen": "-45793.450891",
|
||||
"sinceChange": "-1233.875821"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "oneWay",
|
||||
"position": {
|
||||
"coin": "XRP",
|
||||
"szi": "-39691.0",
|
||||
"leverage": {
|
||||
"type": "cross",
|
||||
"value": 20
|
||||
},
|
||||
"entryPx": "2.468585",
|
||||
"positionValue": "105486.7707",
|
||||
"unrealizedPnl": "-7506.1484",
|
||||
"returnOnEquity": "-1.5321699789",
|
||||
"liquidationPx": "607.2856858464",
|
||||
"marginUsed": "5274.338535",
|
||||
"maxLeverage": 20,
|
||||
"cumFunding": {
|
||||
"allTime": "-2645.400002",
|
||||
"sinceOpen": "-116.036833",
|
||||
"sinceChange": "-116.036833"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "oneWay",
|
||||
"position": {
|
||||
"coin": "HYPE",
|
||||
"szi": "-750315.16",
|
||||
"leverage": {
|
||||
"type": "cross",
|
||||
"value": 5
|
||||
},
|
||||
"entryPx": "43.3419",
|
||||
"positionValue": "34957933.6195600033",
|
||||
"unrealizedPnl": "-2437823.0249080001",
|
||||
"returnOnEquity": "-0.3748177636",
|
||||
"liquidationPx": "76.3945326684",
|
||||
"marginUsed": "6991586.7239119997",
|
||||
"maxLeverage": 5,
|
||||
"cumFunding": {
|
||||
"allTime": "-1881584.4214250001",
|
||||
"sinceOpen": "-1881584.4214250001",
|
||||
"sinceChange": "-45247.838743"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "oneWay",
|
||||
"position": {
|
||||
"coin": "FARTCOIN",
|
||||
"szi": "-4122236.7999999998",
|
||||
"leverage": {
|
||||
"type": "cross",
|
||||
"value": 10
|
||||
},
|
||||
"entryPx": "0.80127",
|
||||
"positionValue": "1681584.057824",
|
||||
"unrealizedPnl": "1621478.3279619999",
|
||||
"returnOnEquity": "4.9090151459",
|
||||
"liquidationPx": "6.034656163",
|
||||
"marginUsed": "168158.405782",
|
||||
"maxLeverage": 10,
|
||||
"cumFunding": {
|
||||
"allTime": "-72941.395024",
|
||||
"sinceOpen": "-51271.5204",
|
||||
"sinceChange": "-6504.295598"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "oneWay",
|
||||
"position": {
|
||||
"coin": "PUMP",
|
||||
"szi": "-1921732999.0",
|
||||
"leverage": {
|
||||
"type": "cross",
|
||||
"value": 5
|
||||
},
|
||||
"entryPx": "0.005551",
|
||||
"positionValue": "9176275.0702250004",
|
||||
"unrealizedPnl": "1491738.24016",
|
||||
"returnOnEquity": "0.6991640321",
|
||||
"liquidationPx": "0.0166674064",
|
||||
"marginUsed": "1835255.0140450001",
|
||||
"maxLeverage": 10,
|
||||
"cumFunding": {
|
||||
"allTime": "-196004.534539",
|
||||
"sinceOpen": "-196004.534539",
|
||||
"sinceChange": "-9892.654861"
|
||||
}
|
||||
}
|
||||
}
|
||||
],
|
||||
"time": 1761595358385
|
||||
},
|
||||
"account_value": 30018881.119369,
|
||||
"margin_used": 22969943.984845,
|
||||
"margin_utilization": 0.765183215640378,
|
||||
"available_margin": 7048937.134523999,
|
||||
"total_position_value": 0.0,
|
||||
"portfolio_leverage": 0.0
|
||||
},
|
||||
"open_orders": {
|
||||
"raw_orders": [
|
||||
{
|
||||
"coin": "WLFI",
|
||||
"side": "B",
|
||||
"limitPx": "0.10447",
|
||||
"sz": "2624.0",
|
||||
"oid": 194029229960,
|
||||
"timestamp": 1760131688558,
|
||||
"origSz": "12760.0",
|
||||
"cloid": "0x00000000000000000000001261000016"
|
||||
},
|
||||
{
|
||||
"coin": "@166",
|
||||
"side": "A",
|
||||
"limitPx": "1.01",
|
||||
"sz": "103038.77",
|
||||
"oid": 174787748753,
|
||||
"timestamp": 1758819420037,
|
||||
"origSz": "3000000.0"
|
||||
}
|
||||
]
|
||||
},
|
||||
"account_metrics": {
|
||||
"cumVlm": "2823125892.6900000572",
|
||||
"nRequestsUsed": 1766294,
|
||||
"nRequestsCap": 2823135892
|
||||
}
|
||||
}
|
||||
}
|
||||
7
_data/wallets_to_track.json
Normal file
7
_data/wallets_to_track.json
Normal file
@ -0,0 +1,7 @@
|
||||
[
|
||||
{
|
||||
"name": "Whale 1 (BTC Maxi)",
|
||||
"address": "0xb83de012dba672c76a7dbbbf3e459cb59d7d6e36",
|
||||
"tags": ["btc", "high_leverage"]
|
||||
}
|
||||
]
|
||||
221
address_monitor.py
Normal file
221
address_monitor.py
Normal file
@ -0,0 +1,221 @@
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import json
|
||||
import argparse
|
||||
from datetime import datetime, timezone
|
||||
from hyperliquid.info import Info
|
||||
from hyperliquid.utils import constants
|
||||
from collections import deque
|
||||
import logging
|
||||
import csv
|
||||
|
||||
from logging_utils import setup_logging
|
||||
|
||||
# --- Configuration ---
|
||||
DEFAULT_ADDRESSES_TO_WATCH = [
|
||||
#"0xd4c1f7e8d876c4749228d515473d36f919583d1d",
|
||||
"0x47930c76790c865217472f2ddb4d14c640ee450a",
|
||||
# "0x4d69495d16fab95c3c27b76978affa50301079d0",
|
||||
# "0x09bc1cf4d9f0b59e1425a8fde4d4b1f7d3c9410d",
|
||||
"0xc6ac58a7a63339898aeda32499a8238a46d88e84",
|
||||
"0xa8ef95dbd3db55911d3307930a84b27d6e969526",
|
||||
# "0x4129c62faf652fea61375dcd9ca8ce24b2bb8b95",
|
||||
"0x32885a6adac4375858E6edC092EfDDb0Ef46484C",
|
||||
]
|
||||
MAX_FILLS_TO_DISPLAY = 10
|
||||
LOGS_DIR = "_logs"
|
||||
recent_fills = {}
|
||||
_lines_printed = 0
|
||||
|
||||
TABLE_HEADER = f"{'Time (UTC)':<10} | {'Coin':<6} | {'Side':<5} | {'Size':>15} | {'Price':>15} | {'Value (USD)':>20}"
|
||||
TABLE_WIDTH = len(TABLE_HEADER)
|
||||
|
||||
def log_fill_to_csv(address: str, fill_data: dict):
|
||||
"""Appends a single fill record to the CSV file for a specific address."""
|
||||
log_file_path = os.path.join(LOGS_DIR, f"fills_{address}.csv")
|
||||
file_exists = os.path.exists(log_file_path)
|
||||
|
||||
# The CSV will store a flattened version of the decoded fill
|
||||
csv_row = {
|
||||
'time_utc': fill_data['time'].isoformat(),
|
||||
'coin': fill_data['coin'],
|
||||
'side': fill_data['side'],
|
||||
'price': fill_data['price'],
|
||||
'size': fill_data['size'],
|
||||
'value_usd': fill_data['value']
|
||||
}
|
||||
|
||||
try:
|
||||
with open(log_file_path, 'a', newline='', encoding='utf-8') as f:
|
||||
writer = csv.DictWriter(f, fieldnames=csv_row.keys())
|
||||
if not file_exists:
|
||||
writer.writeheader()
|
||||
writer.writerow(csv_row)
|
||||
except IOError as e:
|
||||
logging.error(f"Failed to write to CSV log for {address}: {e}")
|
||||
|
||||
def on_message(message):
|
||||
"""
|
||||
Callback function to process incoming userEvents from the WebSocket.
|
||||
"""
|
||||
try:
|
||||
logging.debug(f"Received message: {message}")
|
||||
channel = message.get("channel")
|
||||
if channel in ("user", "userFills"):
|
||||
data = message.get("data")
|
||||
if not data:
|
||||
return
|
||||
|
||||
user_address = data.get("user", "").lower()
|
||||
fills = data.get("fills", [])
|
||||
|
||||
if user_address in recent_fills and fills:
|
||||
logging.info(f"Fill detected for user: {user_address}")
|
||||
for fill_data in fills:
|
||||
decoded_fill = {
|
||||
"time": datetime.fromtimestamp(fill_data['time'] / 1000, tz=timezone.utc),
|
||||
"coin": fill_data['coin'],
|
||||
"side": "BUY" if fill_data['side'] == "B" else "SELL",
|
||||
"price": float(fill_data['px']),
|
||||
"size": float(fill_data['sz']),
|
||||
"value": float(fill_data['px']) * float(fill_data['sz']),
|
||||
}
|
||||
recent_fills[user_address].append(decoded_fill)
|
||||
# --- ADDED: Log every fill to its CSV file ---
|
||||
log_fill_to_csv(user_address, decoded_fill)
|
||||
|
||||
except (KeyError, TypeError, ValueError) as e:
|
||||
logging.error(f"Error processing message: {e} | Data: {message}")
|
||||
|
||||
def build_fills_table(address: str, fills: deque) -> list:
|
||||
"""Builds the formatted lines for a single address's fills table."""
|
||||
lines = []
|
||||
short_address = f"{address[:6]}...{address[-4:]}"
|
||||
|
||||
lines.append(f"--- Fills for {short_address} ---")
|
||||
lines.append(TABLE_HEADER)
|
||||
lines.append("-" * TABLE_WIDTH)
|
||||
|
||||
for fill in list(fills):
|
||||
lines.append(
|
||||
f"{fill['time'].strftime('%H:%M:%S'):<10} | "
|
||||
f"{fill['coin']:<6} | "
|
||||
f"{fill['side']:<5} | "
|
||||
f"{fill['size']:>15.4f} | "
|
||||
f"{fill['price']:>15,.2f} | "
|
||||
f"${fill['value']:>18,.2f}"
|
||||
)
|
||||
|
||||
padding_needed = MAX_FILLS_TO_DISPLAY - len(fills)
|
||||
for _ in range(padding_needed):
|
||||
lines.append("")
|
||||
|
||||
return lines
|
||||
|
||||
def display_dashboard():
|
||||
"""
|
||||
Clears the screen and prints a two-column layout of recent fills tables.
|
||||
"""
|
||||
global _lines_printed
|
||||
|
||||
if _lines_printed > 0:
|
||||
print(f"\x1b[{_lines_printed}A", end="")
|
||||
|
||||
output_lines = ["--- Live Address Fill Monitor ---", ""]
|
||||
|
||||
addresses_to_display = list(recent_fills.keys())
|
||||
num_addresses = len(addresses_to_display)
|
||||
mid_point = (num_addresses + 1) // 2
|
||||
left_column_addresses = addresses_to_display[:mid_point]
|
||||
right_column_addresses = addresses_to_display[mid_point:]
|
||||
|
||||
separator = " | "
|
||||
|
||||
for i in range(mid_point):
|
||||
left_address = left_column_addresses[i]
|
||||
left_table_lines = build_fills_table(left_address, recent_fills[left_address])
|
||||
|
||||
right_table_lines = []
|
||||
if i < len(right_column_addresses):
|
||||
right_address = right_column_addresses[i]
|
||||
right_table_lines = build_fills_table(right_address, recent_fills[right_address])
|
||||
|
||||
table_height = 3 + MAX_FILLS_TO_DISPLAY
|
||||
for j in range(table_height):
|
||||
left_part = left_table_lines[j] if j < len(left_table_lines) else ""
|
||||
right_part = right_table_lines[j] if j < len(right_table_lines) else ""
|
||||
output_lines.append(f"{left_part:<{TABLE_WIDTH}}{separator}{right_part}")
|
||||
output_lines.append("")
|
||||
|
||||
final_output = "\n".join(output_lines) + "\n\x1b[J"
|
||||
print(final_output, end="")
|
||||
|
||||
_lines_printed = len(output_lines)
|
||||
sys.stdout.flush()
|
||||
|
||||
def main():
|
||||
"""
|
||||
Main function to set up the WebSocket and run the display loop.
|
||||
"""
|
||||
global recent_fills
|
||||
parser = argparse.ArgumentParser(description="Monitor live fills for specific wallet addresses on Hyperliquid.")
|
||||
parser.add_argument(
|
||||
"--addresses",
|
||||
nargs='+',
|
||||
default=DEFAULT_ADDRESSES_TO_WATCH,
|
||||
help="A space-separated list of Ethereum addresses to monitor."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--log-level",
|
||||
default="normal",
|
||||
choices=['off', 'normal', 'debug'],
|
||||
help="Set the logging level for the script."
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
setup_logging(args.log_level, 'AddressMonitor')
|
||||
|
||||
# --- ADDED: Ensure the logs directory exists ---
|
||||
if not os.path.exists(LOGS_DIR):
|
||||
os.makedirs(LOGS_DIR)
|
||||
|
||||
addresses_to_watch = []
|
||||
for addr in args.addresses:
|
||||
clean_addr = addr.strip().lower()
|
||||
if len(clean_addr) == 42 and clean_addr.startswith('0x'):
|
||||
addresses_to_watch.append(clean_addr)
|
||||
else:
|
||||
logging.warning(f"Invalid or malformed address provided: '{addr}'. Skipping.")
|
||||
|
||||
recent_fills = {addr: deque(maxlen=MAX_FILLS_TO_DISPLAY) for addr in addresses_to_watch}
|
||||
|
||||
if not addresses_to_watch:
|
||||
print("No valid addresses configured to watch. Exiting.", file=sys.stderr)
|
||||
return
|
||||
|
||||
info = Info(constants.MAINNET_API_URL, skip_ws=False)
|
||||
|
||||
for addr in addresses_to_watch:
|
||||
try:
|
||||
info.subscribe({"type": "userFills", "user": addr}, on_message)
|
||||
logging.debug(f"Queued subscribe for userFills: {addr}")
|
||||
time.sleep(0.02)
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to subscribe for {addr}: {e}")
|
||||
|
||||
logging.info(f"Subscribed to userFills for {len(addresses_to_watch)} addresses")
|
||||
|
||||
print("\nDisplaying live fill data... Press Ctrl+C to stop.")
|
||||
try:
|
||||
while True:
|
||||
display_dashboard()
|
||||
time.sleep(0.2)
|
||||
except KeyboardInterrupt:
|
||||
print("\nStopping WebSocket listener...")
|
||||
info.ws_manager.stop()
|
||||
print("Listener stopped.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
22
agents
22
agents
@ -1,3 +1,19 @@
|
||||
agent 001
|
||||
wallet: 0x7773833262f020c7979ec8aae38455c17ba4040c
|
||||
Private Key: 0x659326d719a4322244d6e7f28e7fa2780f034e9f6a342ef1919664817e6248df
|
||||
==================================================
|
||||
SAVE THESE SECURELY. This is what your bot will use.
|
||||
Name: trade_executor
|
||||
(Agent has a default long-term validity)
|
||||
🔑 Agent Private Key: 0xabed7379ec33253694eba50af8a392a88ea32b72b5f4f9cddceb0f5879428b69
|
||||
🏠 Agent Address: 0xcB262CeAaE5D8A99b713f87a43Dd18E6Be892739
|
||||
==================================================
|
||||
SAVE THESE SECURELY. This is what your bot will use.
|
||||
Name: executor_scalper
|
||||
(Agent has a default long-term validity)
|
||||
🔑 Agent Private Key: 0xe7bd4f3a1e29252ec40edff1bf796beaf13993d23a0c288a75d79c53e3c97812
|
||||
🏠 Agent Address: 0xD211ba67162aD4E785cd4894D00A1A7A32843094
|
||||
==================================================
|
||||
SAVE THESE SECURELY. This is what your bot will use.
|
||||
Name: executor_swing
|
||||
(Agent has a default long-term validity)
|
||||
🔑 Agent Private Key: 0xb6811c8b4a928556b3b95ccfaf72eb452b0d89a903f251b86955654672a3b6ab
|
||||
🏠 Agent Address: 0xAD27c936672Fa368c2d96a47FDA34e8e3A0f318C
|
||||
==================================================
|
||||
368
backtester.py
Normal file
368
backtester.py
Normal file
@ -0,0 +1,368 @@
|
||||
import argparse
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import sqlite3
|
||||
import pandas as pd
|
||||
import json
|
||||
from datetime import datetime, timedelta
|
||||
import itertools
|
||||
import multiprocessing
|
||||
from functools import partial
|
||||
import time
|
||||
import importlib
|
||||
import signal
|
||||
|
||||
from logging_utils import setup_logging
|
||||
|
||||
def _run_trade_simulation(df: pd.DataFrame, capital: float, size_pct: float, leverage_long: int, leverage_short: int, taker_fee_pct: float, maker_fee_pct: float) -> tuple[float, list]:
|
||||
"""
|
||||
Simulates a trading strategy with portfolio management, including capital,
|
||||
position sizing, leverage, and fees.
|
||||
"""
|
||||
df.dropna(inplace=True)
|
||||
if df.empty: return capital, []
|
||||
|
||||
df['position_change'] = df['signal'].diff()
|
||||
trades = []
|
||||
entry_price = 0
|
||||
asset_size = 0
|
||||
current_position = 0 # 0=flat, 1=long, -1=short
|
||||
equity = capital
|
||||
|
||||
for i, row in df.iterrows():
|
||||
# --- Close Positions ---
|
||||
if (current_position == 1 and row['signal'] != 1) or \
|
||||
(current_position == -1 and row['signal'] != -1):
|
||||
|
||||
exit_value = asset_size * row['close']
|
||||
fee = exit_value * (taker_fee_pct / 100)
|
||||
|
||||
if current_position == 1: # Closing a long
|
||||
pnl_usd = (row['close'] - entry_price) * asset_size
|
||||
equity += pnl_usd - fee
|
||||
trades.append({'pnl_usd': pnl_usd, 'pnl_pct': (row['close'] - entry_price) / entry_price, 'type': 'long'})
|
||||
|
||||
elif current_position == -1: # Closing a short
|
||||
pnl_usd = (entry_price - row['close']) * asset_size
|
||||
equity += pnl_usd - fee
|
||||
trades.append({'pnl_usd': pnl_usd, 'pnl_pct': (entry_price - row['close']) / entry_price, 'type': 'short'})
|
||||
|
||||
entry_price = 0
|
||||
asset_size = 0
|
||||
current_position = 0
|
||||
|
||||
# --- Open New Positions ---
|
||||
if current_position == 0:
|
||||
if row['signal'] == 1: # Open Long
|
||||
margin_to_use = equity * (size_pct / 100)
|
||||
trade_value = margin_to_use * leverage_long
|
||||
asset_size = trade_value / row['close']
|
||||
fee = trade_value * (taker_fee_pct / 100)
|
||||
equity -= fee
|
||||
entry_price = row['close']
|
||||
current_position = 1
|
||||
elif row['signal'] == -1: # Open Short
|
||||
margin_to_use = equity * (size_pct / 100)
|
||||
trade_value = margin_to_use * leverage_short
|
||||
asset_size = trade_value / row['close']
|
||||
fee = trade_value * (taker_fee_pct / 100)
|
||||
equity -= fee
|
||||
entry_price = row['close']
|
||||
current_position = -1
|
||||
|
||||
return equity, trades
|
||||
|
||||
|
||||
def simulation_worker(params: dict, db_path: str, coin: str, timeframe: str, start_date: str, end_date: str, strategy_class, sim_params: dict) -> tuple[dict, float, list]:
|
||||
"""
|
||||
Worker function that loads data, runs the full simulation, and returns results.
|
||||
"""
|
||||
df = pd.DataFrame()
|
||||
try:
|
||||
with sqlite3.connect(db_path) as conn:
|
||||
query = f'SELECT datetime_utc, open, high, low, close FROM "{coin}_{timeframe}" WHERE datetime_utc >= ? AND datetime_utc <= ? ORDER BY datetime_utc'
|
||||
df = pd.read_sql(query, conn, params=(start_date, end_date), parse_dates=['datetime_utc'])
|
||||
if not df.empty:
|
||||
df.set_index('datetime_utc', inplace=True)
|
||||
except Exception as e:
|
||||
print(f"Worker error loading data for params {params}: {e}")
|
||||
return (params, sim_params['capital'], [])
|
||||
|
||||
if df.empty:
|
||||
return (params, sim_params['capital'], [])
|
||||
|
||||
strategy_instance = strategy_class(params)
|
||||
df_with_signals = strategy_instance.calculate_signals(df)
|
||||
|
||||
final_equity, trades = _run_trade_simulation(df_with_signals, **sim_params)
|
||||
return (params, final_equity, trades)
|
||||
|
||||
|
||||
def init_worker():
|
||||
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
||||
|
||||
|
||||
class Backtester:
|
||||
def __init__(self, log_level: str, strategy_name_to_test: str, start_date: str, sim_params: dict):
|
||||
setup_logging(log_level, 'Backtester')
|
||||
self.db_path = os.path.join("_data", "market_data.db")
|
||||
self.simulation_params = sim_params
|
||||
|
||||
self.backtest_config = self._load_backtest_config(strategy_name_to_test)
|
||||
# ... (rest of __init__ is unchanged)
|
||||
self.strategy_name = self.backtest_config.get('strategy_name')
|
||||
self.strategy_config = self._load_strategy_config()
|
||||
self.params = self.strategy_config.get('parameters', {})
|
||||
self.coin = self.params.get('coin')
|
||||
self.timeframe = self.params.get('timeframe')
|
||||
self.pool = None
|
||||
self.full_history_start_date = start_date
|
||||
try:
|
||||
module_path, class_name = self.backtest_config['script'].rsplit('.', 1)
|
||||
module = importlib.import_module(module_path)
|
||||
self.strategy_class = getattr(module, class_name)
|
||||
logging.info(f"Successfully loaded strategy class '{class_name}'.")
|
||||
except (ImportError, AttributeError, KeyError) as e:
|
||||
logging.error(f"Could not load strategy script '{self.backtest_config.get('script')}': {e}")
|
||||
sys.exit(1)
|
||||
|
||||
def _load_backtest_config(self, name_to_test: str):
|
||||
# ... (unchanged)
|
||||
config_path = os.path.join("_data", "backtesting_conf.json")
|
||||
try:
|
||||
with open(config_path, 'r') as f: return json.load(f).get(name_to_test)
|
||||
except (FileNotFoundError, json.JSONDecodeError) as e:
|
||||
logging.error(f"Could not load backtesting configuration: {e}")
|
||||
return None
|
||||
|
||||
def _load_strategy_config(self):
|
||||
# ... (unchanged)
|
||||
config_path = os.path.join("_data", "strategies.json")
|
||||
try:
|
||||
with open(config_path, 'r') as f: return json.load(f).get(self.strategy_name)
|
||||
except (FileNotFoundError, json.JSONDecodeError) as e:
|
||||
logging.error(f"Could not load strategy configuration: {e}")
|
||||
return None
|
||||
|
||||
def run_walk_forward_optimization(self, optimization_weeks: int, testing_weeks: int, step_weeks: int):
|
||||
# ... (unchanged, will now use the new simulation logic via the worker)
|
||||
full_df = self.load_data(self.full_history_start_date, datetime.now().strftime("%Y-%m-%d"))
|
||||
if full_df.empty: return
|
||||
|
||||
optimization_delta = timedelta(weeks=optimization_weeks)
|
||||
testing_delta = timedelta(weeks=testing_weeks)
|
||||
step_delta = timedelta(weeks=step_weeks)
|
||||
|
||||
all_out_of_sample_trades = []
|
||||
all_period_summaries = []
|
||||
|
||||
current_date = full_df.index[0]
|
||||
end_date = full_df.index[-1]
|
||||
|
||||
period_num = 1
|
||||
while current_date + optimization_delta + testing_delta <= end_date:
|
||||
logging.info(f"\n--- Starting Walk-Forward Period {period_num} ---")
|
||||
|
||||
in_sample_start = current_date
|
||||
in_sample_end = in_sample_start + optimization_delta
|
||||
out_of_sample_end = in_sample_end + testing_delta
|
||||
|
||||
in_sample_df = full_df[in_sample_start:in_sample_end]
|
||||
out_of_sample_df = full_df[in_sample_end:out_of_sample_end]
|
||||
|
||||
if in_sample_df.empty or out_of_sample_df.empty:
|
||||
break
|
||||
|
||||
logging.info(f"In-Sample (Optimization): {in_sample_df.index[0].date()} to {in_sample_df.index[-1].date()}")
|
||||
logging.info(f"Out-of-Sample (Testing): {out_of_sample_df.index[0].date()} to {out_of_sample_df.index[-1].date()}")
|
||||
|
||||
best_result = self._find_best_params(in_sample_df)
|
||||
if not best_result:
|
||||
all_period_summaries.append({"period": period_num, "params": "None Found"})
|
||||
current_date += step_delta
|
||||
period_num += 1
|
||||
continue
|
||||
|
||||
print("\n--- [1] In-Sample Optimization Result ---")
|
||||
print(f"Best Parameters Found: {best_result['params']}")
|
||||
self._generate_report(best_result['final_equity'], best_result['trades_list'], "In-Sample Performance with Best Params")
|
||||
|
||||
logging.info(f"\n--- [2] Forward Testing on Out-of-Sample Data ---")
|
||||
df_with_signals = self.strategy_class(best_result['params']).calculate_signals(out_of_sample_df.copy())
|
||||
final_equity_oos, out_of_sample_trades = _run_trade_simulation(df_with_signals, **self.simulation_params)
|
||||
|
||||
all_out_of_sample_trades.extend(out_of_sample_trades)
|
||||
oos_summary = self._generate_report(final_equity_oos, out_of_sample_trades, "Out-of-Sample Performance")
|
||||
|
||||
# Store the summary for the final table
|
||||
summary_to_store = {"period": period_num, "params": best_result['params'], **oos_summary}
|
||||
all_period_summaries.append(summary_to_store)
|
||||
|
||||
current_date += step_delta
|
||||
period_num += 1
|
||||
|
||||
# ... (Final reports will be generated here, but need to adapt to equity tracking)
|
||||
print("\n" + "="*50)
|
||||
# self._generate_report(all_out_of_sample_trades, "FINAL AGGREGATE WALK-FORWARD PERFORMANCE")
|
||||
print("="*50)
|
||||
|
||||
# --- ADDED: Final summary table of best parameters and performance per period ---
|
||||
print("\n--- Summary of Best Parameters and Performance per Period ---")
|
||||
header = f"{'#':<3} | {'Best Parameters':<30} | {'Trades':>8} | {'Longs':>6} | {'Shorts':>7} | {'Win %':>8} | {'L Win %':>9} | {'S Win %':>9} | {'Return %':>10} | {'Equity':>15}"
|
||||
print(header)
|
||||
print("-" * len(header))
|
||||
for item in all_period_summaries:
|
||||
params_str = str(item.get('params', 'N/A'))
|
||||
trades = item.get('num_trades', 'N/A')
|
||||
longs = item.get('num_longs', 'N/A')
|
||||
shorts = item.get('num_shorts', 'N/A')
|
||||
win_rate = f"{item.get('win_rate', 0):.2f}%" if 'win_rate' in item else 'N/A'
|
||||
long_win_rate = f"{item.get('long_win_rate', 0):.2f}%" if 'long_win_rate' in item else 'N/A'
|
||||
short_win_rate = f"{item.get('short_win_rate', 0):.2f}%" if 'short_win_rate' in item else 'N/A'
|
||||
return_pct = f"{item.get('return_pct', 0):.2f}%" if 'return_pct' in item else 'N/A'
|
||||
equity = f"${item.get('final_equity', 0):,.2f}" if 'final_equity' in item else 'N/A'
|
||||
print(f"{item['period']:<3} | {params_str:<30} | {trades:>8} | {longs:>6} | {shorts:>7} | {win_rate:>8} | {long_win_rate:>9} | {short_win_rate:>9} | {return_pct:>10} | {equity:>15}")
|
||||
|
||||
def _find_best_params(self, df: pd.DataFrame) -> dict:
|
||||
param_configs = self.backtest_config.get('optimization_params', {})
|
||||
param_names = list(param_configs.keys())
|
||||
param_ranges = [range(p['start'], p['end'] + 1, p['step']) for p in param_configs.values()]
|
||||
|
||||
all_combinations = list(itertools.product(*param_ranges))
|
||||
param_dicts = [dict(zip(param_names, combo)) for combo in all_combinations]
|
||||
|
||||
logging.info(f"Optimizing on {len(all_combinations)} combinations...")
|
||||
|
||||
num_cores = 60
|
||||
self.pool = multiprocessing.Pool(processes=num_cores, initializer=init_worker)
|
||||
|
||||
worker = partial(
|
||||
simulation_worker,
|
||||
db_path=self.db_path, coin=self.coin, timeframe=self.timeframe,
|
||||
start_date=df.index[0].isoformat(), end_date=df.index[-1].isoformat(),
|
||||
strategy_class=self.strategy_class,
|
||||
sim_params=self.simulation_params
|
||||
)
|
||||
|
||||
all_results = self.pool.map(worker, param_dicts)
|
||||
|
||||
self.pool.close()
|
||||
self.pool.join()
|
||||
self.pool = None
|
||||
|
||||
results = [{'params': params, 'final_equity': final_equity, 'trades_list': trades} for params, final_equity, trades in all_results if trades]
|
||||
if not results: return None
|
||||
return max(results, key=lambda x: x['final_equity'])
|
||||
|
||||
def load_data(self, start_date, end_date):
|
||||
# ... (unchanged)
|
||||
table_name = f"{self.coin}_{self.timeframe}"
|
||||
logging.info(f"Loading full dataset for {table_name}...")
|
||||
try:
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
query = f'SELECT * FROM "{table_name}" WHERE datetime_utc >= ? AND datetime_utc <= ? ORDER BY datetime_utc'
|
||||
df = pd.read_sql(query, conn, params=(start_date, end_date), parse_dates=['datetime_utc'])
|
||||
if df.empty: return pd.DataFrame()
|
||||
df.set_index('datetime_utc', inplace=True)
|
||||
return df
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to load data for backtest: {e}")
|
||||
return pd.DataFrame()
|
||||
|
||||
def _generate_report(self, final_equity: float, trades: list, title: str) -> dict:
|
||||
"""Calculates, prints, and returns a detailed performance report."""
|
||||
print(f"\n--- {title} ---")
|
||||
|
||||
initial_capital = self.simulation_params['capital']
|
||||
|
||||
if not trades:
|
||||
print("No trades were executed during this period.")
|
||||
print(f"Final Equity: ${initial_capital:,.2f}")
|
||||
return {"num_trades": 0, "num_longs": 0, "num_shorts": 0, "win_rate": 0, "long_win_rate": 0, "short_win_rate": 0, "return_pct": 0, "final_equity": initial_capital}
|
||||
|
||||
num_trades = len(trades)
|
||||
long_trades = [t for t in trades if t.get('type') == 'long']
|
||||
short_trades = [t for t in trades if t.get('type') == 'short']
|
||||
|
||||
pnls_pct = pd.Series([t['pnl_pct'] for t in trades])
|
||||
|
||||
wins = pnls_pct[pnls_pct > 0]
|
||||
win_rate = (len(wins) / num_trades) * 100 if num_trades > 0 else 0
|
||||
|
||||
long_wins = len([t for t in long_trades if t['pnl_pct'] > 0])
|
||||
short_wins = len([t for t in short_trades if t['pnl_pct'] > 0])
|
||||
long_win_rate = (long_wins / len(long_trades)) * 100 if long_trades else 0
|
||||
short_win_rate = (short_wins / len(short_trades)) * 100 if short_trades else 0
|
||||
|
||||
total_return_pct = ((final_equity - initial_capital) / initial_capital) * 100
|
||||
|
||||
print(f"Final Equity: ${final_equity:,.2f}")
|
||||
print(f"Total Return: {total_return_pct:.2f}%")
|
||||
print(f"Total Trades: {num_trades} (Longs: {len(long_trades)}, Shorts: {len(short_trades)})")
|
||||
print(f"Win Rate (Overall): {win_rate:.2f}%")
|
||||
print(f"Win Rate (Longs): {long_win_rate:.2f}%")
|
||||
print(f"Win Rate (Shorts): {short_win_rate:.2f}%")
|
||||
|
||||
# Return a dictionary of the key metrics for the summary table
|
||||
return {
|
||||
"num_trades": num_trades,
|
||||
"num_longs": len(long_trades),
|
||||
"num_shorts": len(short_trades),
|
||||
"win_rate": win_rate,
|
||||
"long_win_rate": long_win_rate,
|
||||
"short_win_rate": short_win_rate,
|
||||
"return_pct": total_return_pct,
|
||||
"final_equity": final_equity
|
||||
}
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Run a Walk-Forward Optimization for a trading strategy.")
|
||||
parser.add_argument("--strategy", required=True, help="The name of the backtest config to run.")
|
||||
parser.add_argument("--start-date", default="2020-08-01", help="The overall start date for historical data.")
|
||||
parser.add_argument("--optimization-weeks", type=int, default=4)
|
||||
parser.add_argument("--testing-weeks", type=int, default=1)
|
||||
parser.add_argument("--step-weeks", type=int, default=1)
|
||||
parser.add_argument("--log-level", default="normal", choices=['off', 'normal', 'debug'])
|
||||
|
||||
parser.add_argument("--capital", type=float, default=1000)
|
||||
parser.add_argument("--size-pct", type=float, default=50)
|
||||
parser.add_argument("--leverage-long", type=int, default=3)
|
||||
parser.add_argument("--leverage-short", type=int, default=2)
|
||||
parser.add_argument("--taker-fee-pct", type=float, default=0.045)
|
||||
parser.add_argument("--maker-fee-pct", type=float, default=0.015)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
sim_params = {
|
||||
"capital": args.capital,
|
||||
"size_pct": args.size_pct,
|
||||
"leverage_long": args.leverage_long,
|
||||
"leverage_short": args.leverage_short,
|
||||
"taker_fee_pct": args.taker_fee_pct,
|
||||
"maker_fee_pct": args.maker_fee_pct
|
||||
}
|
||||
|
||||
backtester = Backtester(
|
||||
log_level=args.log_level,
|
||||
strategy_name_to_test=args.strategy,
|
||||
start_date=args.start_date,
|
||||
sim_params=sim_params
|
||||
)
|
||||
|
||||
try:
|
||||
backtester.run_walk_forward_optimization(
|
||||
optimization_weeks=args.optimization_weeks,
|
||||
testing_weeks=args.testing_weeks,
|
||||
step_weeks=args.step_weeks
|
||||
)
|
||||
except KeyboardInterrupt:
|
||||
logging.info("\nBacktest optimization cancelled by user.")
|
||||
finally:
|
||||
if backtester.pool:
|
||||
logging.info("Terminating worker processes...")
|
||||
backtester.pool.terminate()
|
||||
backtester.pool.join()
|
||||
logging.info("Worker processes terminated.")
|
||||
|
||||
165
base_strategy.py
Normal file
165
base_strategy.py
Normal file
@ -0,0 +1,165 @@
|
||||
from abc import ABC, abstractmethod
|
||||
import pandas as pd
|
||||
import json
|
||||
import os
|
||||
import logging
|
||||
from datetime import datetime, timezone
|
||||
import sqlite3
|
||||
import multiprocessing
|
||||
import time
|
||||
|
||||
from logging_utils import setup_logging
|
||||
from hyperliquid.info import Info
|
||||
from hyperliquid.utils import constants
|
||||
|
||||
class BaseStrategy(ABC):
|
||||
"""
|
||||
An abstract base class that defines the blueprint for all trading strategies.
|
||||
It provides common functionality like loading data, saving status, and state management.
|
||||
"""
|
||||
|
||||
def __init__(self, strategy_name: str, params: dict, trade_signal_queue: multiprocessing.Queue = None, shared_status: dict = None):
|
||||
self.strategy_name = strategy_name
|
||||
self.params = params
|
||||
self.trade_signal_queue = trade_signal_queue
|
||||
# Optional multiprocessing.Manager().dict() to hold live status (avoids file IO)
|
||||
self.shared_status = shared_status
|
||||
|
||||
self.coin = params.get("coin", "N/A")
|
||||
self.timeframe = params.get("timeframe", "N/A")
|
||||
self.db_path = os.path.join("_data", "market_data.db")
|
||||
self.status_file_path = os.path.join("_data", f"strategy_status_{self.strategy_name}.json")
|
||||
|
||||
self.current_signal = "INIT"
|
||||
self.last_signal_change_utc = None
|
||||
self.signal_price = None
|
||||
|
||||
# Note: Logging is set up by the run_strategy function
|
||||
|
||||
def load_data(self) -> pd.DataFrame:
|
||||
"""Loads historical data for the configured coin and timeframe."""
|
||||
table_name = f"{self.coin}_{self.timeframe}"
|
||||
|
||||
periods = [v for k, v in self.params.items() if 'period' in k or '_ma' in k or 'slow' in k or 'fast' in k]
|
||||
limit = max(periods) + 50 if periods else 500
|
||||
|
||||
try:
|
||||
with sqlite3.connect(f"file:{self.db_path}?mode=ro", uri=True) as conn:
|
||||
query = f'SELECT * FROM "{table_name}" ORDER BY datetime_utc DESC LIMIT {limit}'
|
||||
df = pd.read_sql(query, conn, parse_dates=['datetime_utc'])
|
||||
if df.empty: return pd.DataFrame()
|
||||
df.set_index('datetime_utc', inplace=True)
|
||||
df.sort_index(inplace=True)
|
||||
return df
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to load data from table '{table_name}': {e}")
|
||||
return pd.DataFrame()
|
||||
|
||||
@abstractmethod
|
||||
def calculate_signals(self, df: pd.DataFrame) -> pd.DataFrame:
|
||||
"""The core logic of the strategy. Must be implemented by child classes."""
|
||||
pass
|
||||
|
||||
def calculate_signals_and_state(self, df: pd.DataFrame) -> bool:
|
||||
"""
|
||||
A wrapper that calls the strategy's signal calculation, determines
|
||||
the last signal change, and returns True if the signal has changed.
|
||||
"""
|
||||
df_with_signals = self.calculate_signals(df)
|
||||
df_with_signals.dropna(inplace=True)
|
||||
if df_with_signals.empty:
|
||||
return False
|
||||
|
||||
df_with_signals['position_change'] = df_with_signals['signal'].diff()
|
||||
|
||||
last_signal_int = df_with_signals['signal'].iloc[-1]
|
||||
new_signal_str = "HOLD"
|
||||
if last_signal_int == 1: new_signal_str = "BUY"
|
||||
elif last_signal_int == -1: new_signal_str = "SELL"
|
||||
|
||||
signal_changed = False
|
||||
if self.current_signal == "INIT":
|
||||
if new_signal_str == "BUY": self.current_signal = "INIT_BUY"
|
||||
elif new_signal_str == "SELL": self.current_signal = "INIT_SELL"
|
||||
else: self.current_signal = "HOLD"
|
||||
signal_changed = True
|
||||
elif new_signal_str != self.current_signal:
|
||||
self.current_signal = new_signal_str
|
||||
signal_changed = True
|
||||
|
||||
if signal_changed:
|
||||
last_change_series = df_with_signals[df_with_signals['position_change'] != 0]
|
||||
if not last_change_series.empty:
|
||||
last_change_row = last_change_series.iloc[-1]
|
||||
self.last_signal_change_utc = last_change_row.name.tz_localize('UTC').isoformat()
|
||||
self.signal_price = last_change_row['close']
|
||||
|
||||
return signal_changed
|
||||
|
||||
def _save_status(self):
|
||||
"""Saves the current strategy state to its JSON file."""
|
||||
status = {
|
||||
"strategy_name": self.strategy_name,
|
||||
"current_signal": self.current_signal,
|
||||
"last_signal_change_utc": self.last_signal_change_utc,
|
||||
"signal_price": self.signal_price,
|
||||
"last_checked_utc": datetime.now(timezone.utc).isoformat()
|
||||
}
|
||||
# If a shared status dict is provided (Manager.dict()), update it instead of writing files
|
||||
try:
|
||||
if self.shared_status is not None:
|
||||
try:
|
||||
# store the status under the strategy name for easy lookup
|
||||
self.shared_status[self.strategy_name] = status
|
||||
except Exception:
|
||||
# Manager proxies may not accept nested mutable objects consistently; assign a copy
|
||||
self.shared_status[self.strategy_name] = dict(status)
|
||||
else:
|
||||
with open(self.status_file_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(status, f, indent=4)
|
||||
except IOError as e:
|
||||
logging.error(f"Failed to write status file for {self.strategy_name}: {e}")
|
||||
|
||||
def run_polling_loop(self):
|
||||
"""
|
||||
The default execution loop for polling-based strategies (e.g., SMAs).
|
||||
"""
|
||||
while True:
|
||||
df = self.load_data()
|
||||
if df.empty:
|
||||
logging.warning("No data loaded. Waiting 1 minute...")
|
||||
time.sleep(60)
|
||||
continue
|
||||
|
||||
signal_changed = self.calculate_signals_and_state(df.copy())
|
||||
self._save_status()
|
||||
|
||||
if signal_changed or self.current_signal == "INIT_BUY" or self.current_signal == "INIT_SELL":
|
||||
logging.warning(f"New signal detected: {self.current_signal}")
|
||||
self.trade_signal_queue.put({
|
||||
"strategy_name": self.strategy_name,
|
||||
"signal": self.current_signal,
|
||||
"coin": self.coin,
|
||||
"signal_price": self.signal_price,
|
||||
"config": {"agent": self.params.get("agent"), "parameters": self.params}
|
||||
})
|
||||
if self.current_signal == "INIT_BUY": self.current_signal = "BUY"
|
||||
if self.current_signal == "INIT_SELL": self.current_signal = "SELL"
|
||||
|
||||
logging.info(f"Current Signal: {self.current_signal}")
|
||||
time.sleep(60)
|
||||
|
||||
def run_event_loop(self):
|
||||
"""
|
||||
A placeholder for event-driven (WebSocket) strategies.
|
||||
Child classes must override this.
|
||||
"""
|
||||
logging.error("run_event_loop() is not implemented for this strategy.")
|
||||
time.sleep(3600) # Sleep for an hour to prevent rapid error loops
|
||||
|
||||
def on_fill_message(self, message):
|
||||
"""
|
||||
Placeholder for the WebSocket callback.
|
||||
Child classes must override this.
|
||||
"""
|
||||
pass
|
||||
31
basic_ws.py
Normal file
31
basic_ws.py
Normal file
@ -0,0 +1,31 @@
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import json
|
||||
from datetime import datetime, timezone
|
||||
from hyperliquid.info import Info
|
||||
from hyperliquid.utils import constants
|
||||
from collections import deque
|
||||
|
||||
def main():
|
||||
address, info, _ = example_utils.setup(constants.MAINNET_API_URL)
|
||||
# An example showing how to subscribe to the different subscription types and prints the returned messages
|
||||
# Some subscriptions do not return snapshots, so you will not receive a message until something happens
|
||||
info.subscribe({"type": "allMids"}, print)
|
||||
info.subscribe({"type": "l2Book", "coin": "ETH"}, print)
|
||||
info.subscribe({"type": "trades", "coin": "PURR/USDC"}, print)
|
||||
info.subscribe({"type": "userEvents", "user": address}, print)
|
||||
info.subscribe({"type": "userFills", "user": address}, print)
|
||||
info.subscribe({"type": "candle", "coin": "ETH", "interval": "1m"}, print)
|
||||
info.subscribe({"type": "orderUpdates", "user": address}, print)
|
||||
info.subscribe({"type": "userFundings", "user": address}, print)
|
||||
info.subscribe({"type": "userNonFundingLedgerUpdates", "user": address}, print)
|
||||
info.subscribe({"type": "webData2", "user": address}, print)
|
||||
info.subscribe({"type": "bbo", "coin": "ETH"}, print)
|
||||
info.subscribe({"type": "activeAssetCtx", "coin": "BTC"}, print) # Perp
|
||||
info.subscribe({"type": "activeAssetCtx", "coin": "@1"}, print) # Spot
|
||||
info.subscribe({"type": "activeAssetData", "user": address, "coin": "BTC"}, print) # Perp only
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
95
coin_id_map.py
Normal file
95
coin_id_map.py
Normal file
@ -0,0 +1,95 @@
|
||||
import os
|
||||
import json
|
||||
import logging
|
||||
import requests
|
||||
from hyperliquid.info import Info
|
||||
from hyperliquid.utils import constants
|
||||
|
||||
from logging_utils import setup_logging
|
||||
|
||||
def update_coin_mapping():
|
||||
"""
|
||||
Fetches all assets from Hyperliquid and all coins from CoinGecko,
|
||||
then creates and saves a mapping from the Hyperliquid symbol to the
|
||||
CoinGecko ID using a robust matching algorithm.
|
||||
"""
|
||||
setup_logging('normal', 'CoinMapUpdater')
|
||||
logging.info("Starting coin mapping update process...")
|
||||
|
||||
# --- 1. Fetch all assets from Hyperliquid ---
|
||||
try:
|
||||
logging.info("Fetching assets from Hyperliquid...")
|
||||
info = Info(constants.MAINNET_API_URL, skip_ws=True)
|
||||
meta, asset_contexts = info.meta_and_asset_ctxs()
|
||||
hyperliquid_assets = meta['universe']
|
||||
logging.info(f"Found {len(hyperliquid_assets)} assets on Hyperliquid.")
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to fetch assets from Hyperliquid: {e}")
|
||||
return
|
||||
|
||||
# --- 2. Fetch all coins from CoinGecko ---
|
||||
try:
|
||||
logging.info("Fetching coin list from CoinGecko...")
|
||||
response = requests.get("https://api.coingecko.com/api/v3/coins/list")
|
||||
response.raise_for_status()
|
||||
coingecko_coins = response.json()
|
||||
|
||||
# Create more robust lookup tables
|
||||
cg_symbol_lookup = {coin['symbol'].upper(): coin['id'] for coin in coingecko_coins}
|
||||
cg_name_lookup = {coin['name'].upper(): coin['id'] for coin in coingecko_coins}
|
||||
|
||||
logging.info(f"Found {len(coingecko_coins)} coins on CoinGecko.")
|
||||
except requests.exceptions.RequestException as e:
|
||||
logging.error(f"Failed to fetch coin list from CoinGecko: {e}")
|
||||
return
|
||||
|
||||
# --- 3. Create the mapping ---
|
||||
final_mapping = {}
|
||||
# Use manual overrides for critical coins where symbols are ambiguous
|
||||
manual_overrides = {
|
||||
"BTC": "bitcoin",
|
||||
"ETH": "ethereum",
|
||||
"SOL": "solana",
|
||||
"BNB": "binancecoin",
|
||||
"HYPE": "hyperliquid",
|
||||
"PUMP": "pump-fun",
|
||||
"ASTER": "astar",
|
||||
"ZEC": "zcash",
|
||||
"SUI": "sui",
|
||||
"ACE": "endurance",
|
||||
# Add other important ones you watch here
|
||||
}
|
||||
|
||||
logging.info("Generating symbol-to-id mapping...")
|
||||
for asset in hyperliquid_assets:
|
||||
asset_symbol = asset['name'].upper()
|
||||
asset_name = asset.get('name', '').upper() # Use full name if available
|
||||
|
||||
# Priority 1: Manual Overrides
|
||||
if asset_symbol in manual_overrides:
|
||||
final_mapping[asset_symbol] = manual_overrides[asset_symbol]
|
||||
continue
|
||||
|
||||
# Priority 2: Exact Name Match
|
||||
if asset_name in cg_name_lookup:
|
||||
final_mapping[asset_symbol] = cg_name_lookup[asset_name]
|
||||
continue
|
||||
|
||||
# Priority 3: Symbol Match
|
||||
if asset_symbol in cg_symbol_lookup:
|
||||
final_mapping[asset_symbol] = cg_symbol_lookup[asset_symbol]
|
||||
else:
|
||||
logging.warning(f"No match found for '{asset_symbol}' on CoinGecko. It will be excluded.")
|
||||
|
||||
# --- 4. Save the mapping to a file ---
|
||||
map_file_path = os.path.join("_data", "coin_id_map.json")
|
||||
try:
|
||||
with open(map_file_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(final_mapping, f, indent=4, sort_keys=True)
|
||||
logging.info(f"Successfully saved new coin mapping with {len(final_mapping)} entries to '{map_file_path}'.")
|
||||
except IOError as e:
|
||||
logging.error(f"Failed to write coin mapping file: {e}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
update_coin_mapping()
|
||||
|
||||
70
create_agent.py
Normal file
70
create_agent.py
Normal file
@ -0,0 +1,70 @@
|
||||
import os
|
||||
from eth_account import Account
|
||||
from hyperliquid.exchange import Exchange
|
||||
from hyperliquid.utils import constants
|
||||
from dotenv import load_dotenv
|
||||
from datetime import datetime, timedelta
|
||||
import json
|
||||
|
||||
# Load environment variables from a .env file if it exists
|
||||
load_dotenv()
|
||||
|
||||
def create_and_authorize_agent():
|
||||
"""
|
||||
Creates and authorizes a new agent key pair using your main wallet,
|
||||
following the correct SDK pattern.
|
||||
"""
|
||||
# --- STEP 1: Load your main wallet ---
|
||||
# This is the wallet that holds the funds and has been activated on Hyperliquid.
|
||||
main_wallet_private_key = os.environ.get("MAIN_WALLET_PRIVATE_KEY")
|
||||
if not main_wallet_private_key:
|
||||
main_wallet_private_key = input("Please enter the private key of your MAIN trading wallet: ")
|
||||
|
||||
try:
|
||||
main_account = Account.from_key(main_wallet_private_key)
|
||||
print(f"\n✅ Loaded main wallet: {main_account.address}")
|
||||
except Exception as e:
|
||||
print(f"❌ Error: Invalid main wallet private key provided. Details: {e}")
|
||||
return
|
||||
|
||||
# --- STEP 2: Initialize the Exchange with your MAIN account ---
|
||||
# This object is used to send the authorization transaction.
|
||||
exchange = Exchange(main_account, constants.MAINNET_API_URL, account_address=main_account.address)
|
||||
|
||||
# --- STEP 3: Create and approve the agent with a specific name ---
|
||||
# agent name must be between 1 and 16 characters long
|
||||
agent_name = "executor_swing"
|
||||
|
||||
print(f"\n🔗 Authorizing a new agent named '{agent_name}'...")
|
||||
try:
|
||||
# --- FIX: Pass only the agent name string to the function ---
|
||||
approve_result, agent_private_key = exchange.approve_agent(agent_name)
|
||||
|
||||
if approve_result.get("status") == "ok":
|
||||
# Derive the agent's public address from the key we received
|
||||
agent_account = Account.from_key(agent_private_key)
|
||||
|
||||
print("\n🎉 SUCCESS! Agent has been authorized on-chain.")
|
||||
print("="*50)
|
||||
print("SAVE THESE SECURELY. This is what your bot will use.")
|
||||
print(f" Name: {agent_name}")
|
||||
print(f" (Agent has a default long-term validity)")
|
||||
print(f"🔑 Agent Private Key: {agent_private_key}")
|
||||
print(f"🏠 Agent Address: {agent_account.address}")
|
||||
print("="*50)
|
||||
print("\nYou can now set this private key as the AGENT_PRIVATE_KEY environment variable.")
|
||||
else:
|
||||
print("\n❌ ERROR: Agent authorization failed.")
|
||||
print(" Response:", approve_result)
|
||||
if "Vault may not perform this action" in str(approve_result):
|
||||
print("\n ACTION REQUIRED: This error means your main wallet (vault) has not been activated. "
|
||||
"Please go to the Hyperliquid website, connect this wallet, and make a deposit to activate it.")
|
||||
|
||||
|
||||
except Exception as e:
|
||||
print(f"\nAn unexpected error occurred during authorization: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
create_and_authorize_agent()
|
||||
|
||||
136
dashboard_data_fetcher.py
Normal file
136
dashboard_data_fetcher.py
Normal file
@ -0,0 +1,136 @@
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import time
|
||||
import argparse # <-- THE FIX: Added this import
|
||||
from datetime import datetime
|
||||
from eth_account import Account
|
||||
from hyperliquid.info import Info
|
||||
from hyperliquid.utils import constants
|
||||
from dotenv import load_dotenv
|
||||
|
||||
from logging_utils import setup_logging
|
||||
|
||||
# Load .env file
|
||||
load_dotenv()
|
||||
|
||||
class DashboardDataFetcher:
|
||||
"""
|
||||
A dedicated, lightweight process that runs in a loop to fetch and save
|
||||
the account's state (balances, positions) for the main dashboard to display.
|
||||
"""
|
||||
|
||||
def __init__(self, log_level: str):
|
||||
setup_logging(log_level, 'DashboardDataFetcher')
|
||||
|
||||
self.vault_address = os.environ.get("MAIN_WALLET_ADDRESS")
|
||||
if not self.vault_address:
|
||||
logging.error("MAIN_WALLET_ADDRESS not set in .env file. Cannot proceed.")
|
||||
sys.exit(1)
|
||||
|
||||
self.info = Info(constants.MAINNET_API_URL, skip_ws=True)
|
||||
self.status_file_path = os.path.join("_logs", "trade_executor_status.json")
|
||||
self.managed_positions_path = os.path.join("_data", "executor_managed_positions.json")
|
||||
logging.info(f"Dashboard Data Fetcher initialized for vault: {self.vault_address}")
|
||||
|
||||
def load_managed_positions(self) -> dict:
|
||||
"""Loads the state of which strategy manages which position."""
|
||||
if os.path.exists(self.managed_positions_path):
|
||||
try:
|
||||
with open(self.managed_positions_path, 'r') as f:
|
||||
data = json.load(f)
|
||||
# Create a reverse map: {coin: strategy_name}
|
||||
return {v['coin']: k for k, v in data.items()}
|
||||
except (IOError, json.JSONDecodeError):
|
||||
logging.warning("Could not read managed positions file.")
|
||||
return {}
|
||||
|
||||
def fetch_and_save_status(self):
|
||||
"""Fetches all account data and saves it to the JSON status file."""
|
||||
try:
|
||||
perpetuals_state = self.info.user_state(self.vault_address)
|
||||
spot_state = self.info.spot_user_state(self.vault_address)
|
||||
meta, all_market_contexts = self.info.meta_and_asset_ctxs()
|
||||
coin_to_strategy_map = self.load_managed_positions()
|
||||
|
||||
status = {
|
||||
"last_updated_utc": datetime.now().isoformat(),
|
||||
"perpetuals_account": { "balances": {}, "open_positions": [] },
|
||||
"spot_account": { "positions": [] }
|
||||
}
|
||||
|
||||
# 1. Extract Perpetuals Account Data
|
||||
margin_summary = perpetuals_state.get("marginSummary", {})
|
||||
status["perpetuals_account"]["balances"] = {
|
||||
"account_value": margin_summary.get("accountValue"),
|
||||
"total_margin_used": margin_summary.get("totalMarginUsed"),
|
||||
"withdrawable": margin_summary.get("withdrawable")
|
||||
}
|
||||
|
||||
asset_positions = perpetuals_state.get("assetPositions", [])
|
||||
for asset_pos in asset_positions:
|
||||
pos = asset_pos.get('position', {})
|
||||
if float(pos.get('szi', 0)) != 0:
|
||||
coin = pos.get('coin')
|
||||
position_value = float(pos.get('positionValue', 0))
|
||||
margin_used = float(pos.get('marginUsed', 0))
|
||||
leverage = position_value / margin_used if margin_used > 0 else 0
|
||||
|
||||
position_info = {
|
||||
"coin": coin,
|
||||
"strategy": coin_to_strategy_map.get(coin, "Unmanaged"),
|
||||
"size": pos.get('szi'),
|
||||
"position_value": pos.get('positionValue'),
|
||||
"entry_price": pos.get('entryPx'),
|
||||
"mark_price": pos.get('markPx'),
|
||||
"pnl": pos.get('unrealizedPnl'),
|
||||
"liq_price": pos.get('liquidationPx'),
|
||||
"margin": pos.get('marginUsed'),
|
||||
"funding": pos.get('fundingRate'),
|
||||
"leverage": f"{leverage:.1f}x"
|
||||
}
|
||||
status["perpetuals_account"]["open_positions"].append(position_info)
|
||||
|
||||
# 2. Extract Spot Account Data
|
||||
price_map = { asset.get("universe", {}).get("name"): asset.get("markPx") for asset in all_market_contexts if asset.get("universe", {}).get("name") }
|
||||
spot_balances = spot_state.get("balances", [])
|
||||
for bal in spot_balances:
|
||||
total_balance = float(bal.get('total', 0))
|
||||
if total_balance > 0:
|
||||
coin = bal.get('coin')
|
||||
mark_price = float(price_map.get(coin, 0))
|
||||
status["spot_account"]["positions"].append({
|
||||
"coin": coin, "balance_size": total_balance,
|
||||
"position_value": total_balance * mark_price, "pnl": "N/A"
|
||||
})
|
||||
|
||||
# 3. Write to file
|
||||
# Use atomic write to prevent partial reads from main_app
|
||||
temp_file_path = self.status_file_path + ".tmp"
|
||||
with open(temp_file_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(status, f, indent=4)
|
||||
# Rename is atomic
|
||||
os.replace(temp_file_path, self.status_file_path)
|
||||
|
||||
logging.debug(f"Successfully updated dashboard status file.")
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to fetch or save account status: {e}")
|
||||
|
||||
def run(self):
|
||||
"""Main loop to periodically fetch and save data."""
|
||||
while True:
|
||||
self.fetch_and_save_status()
|
||||
time.sleep(5) # Update dashboard data every 5 seconds
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Run the Dashboard Data Fetcher.")
|
||||
parser.add_argument("--log-level", default="normal", choices=['off', 'normal', 'debug'])
|
||||
args = parser.parse_args()
|
||||
|
||||
fetcher = DashboardDataFetcher(log_level=args.log_level)
|
||||
try:
|
||||
fetcher.run()
|
||||
except KeyboardInterrupt:
|
||||
logging.info("Dashboard Data Fetcher stopped.")
|
||||
56
del_market_cap_tables.py
Normal file
56
del_market_cap_tables.py
Normal file
@ -0,0 +1,56 @@
|
||||
import sqlite3
|
||||
import logging
|
||||
import os
|
||||
|
||||
from logging_utils import setup_logging
|
||||
|
||||
def cleanup_market_cap_tables():
|
||||
"""
|
||||
Scans the database and drops all tables related to market cap data
|
||||
to allow for a clean refresh.
|
||||
"""
|
||||
setup_logging('normal', 'DBCleanup')
|
||||
db_path = os.path.join("_data", "market_data.db")
|
||||
|
||||
if not os.path.exists(db_path):
|
||||
logging.error(f"Database file not found at '{db_path}'. Nothing to clean.")
|
||||
return
|
||||
|
||||
logging.info(f"Connecting to database at '{db_path}'...")
|
||||
try:
|
||||
with sqlite3.connect(db_path) as conn:
|
||||
cursor = conn.cursor()
|
||||
|
||||
# Find all tables that were created by the market cap fetcher
|
||||
cursor.execute("""
|
||||
SELECT name FROM sqlite_master
|
||||
WHERE type='table'
|
||||
AND (name LIKE '%_market_cap' OR name LIKE 'TOTAL_%')
|
||||
""")
|
||||
|
||||
tables_to_drop = cursor.fetchall()
|
||||
|
||||
if not tables_to_drop:
|
||||
logging.info("No market cap tables found to clean up. Database is already clean.")
|
||||
return
|
||||
|
||||
logging.warning(f"Found {len(tables_to_drop)} market cap tables to remove...")
|
||||
|
||||
for table in tables_to_drop:
|
||||
table_name = table[0]
|
||||
try:
|
||||
logging.info(f"Dropping table: {table_name}...")
|
||||
conn.execute(f'DROP TABLE IF EXISTS "{table_name}"')
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to drop table {table_name}: {e}")
|
||||
|
||||
conn.commit()
|
||||
logging.info("--- Database cleanup complete ---")
|
||||
|
||||
except sqlite3.Error as e:
|
||||
logging.error(f"A database error occurred: {e}")
|
||||
except Exception as e:
|
||||
logging.error(f"An unexpected error occurred: {e}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
cleanup_market_cap_tables()
|
||||
118
fix_timestamps.py
Normal file
118
fix_timestamps.py
Normal file
@ -0,0 +1,118 @@
|
||||
import argparse
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import sqlite3
|
||||
import pandas as pd
|
||||
# script to fix missing millisecond timestamps in the database after import from CSVs (this is already fixed in import_csv.py)
|
||||
# Assuming logging_utils.py is in the same directory
|
||||
from logging_utils import setup_logging
|
||||
|
||||
class DatabaseFixer:
|
||||
"""
|
||||
Scans the SQLite database for rows with missing millisecond timestamps
|
||||
and updates them based on the datetime_utc column.
|
||||
"""
|
||||
|
||||
def __init__(self, log_level: str, coin: str):
|
||||
setup_logging(log_level, 'TimestampFixer')
|
||||
self.coin = coin
|
||||
self.table_name = f"{self.coin}_1m"
|
||||
self.db_path = os.path.join("_data", "market_data.db")
|
||||
|
||||
def run(self):
|
||||
"""Orchestrates the entire database update and verification process."""
|
||||
logging.info(f"Starting timestamp fix process for table '{self.table_name}'...")
|
||||
|
||||
if not os.path.exists(self.db_path):
|
||||
logging.error(f"Database file not found at '{self.db_path}'. Exiting.")
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
conn.execute("PRAGMA journal_mode=WAL;")
|
||||
|
||||
# 1. Check how many rows need fixing
|
||||
rows_to_fix_count = self._count_rows_to_fix(conn)
|
||||
if rows_to_fix_count == 0:
|
||||
logging.info(f"No rows with missing timestamps found in '{self.table_name}'. No action needed.")
|
||||
return
|
||||
|
||||
logging.info(f"Found {rows_to_fix_count:,} rows with missing timestamps to update.")
|
||||
|
||||
# 2. Process the table in chunks to conserve memory
|
||||
updated_count = self._process_in_chunks(conn)
|
||||
|
||||
# 3. Provide a final summary
|
||||
self._summarize_update(rows_to_fix_count, updated_count)
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"A critical error occurred: {e}")
|
||||
|
||||
def _count_rows_to_fix(self, conn) -> int:
|
||||
"""Counts the number of rows where timestamp_ms is NULL."""
|
||||
try:
|
||||
return pd.read_sql(f'SELECT COUNT(*) FROM "{self.table_name}" WHERE timestamp_ms IS NULL', conn).iloc[0, 0]
|
||||
except pd.io.sql.DatabaseError:
|
||||
logging.error(f"Table '{self.table_name}' not found in the database. Cannot fix timestamps.")
|
||||
sys.exit(1)
|
||||
|
||||
def _process_in_chunks(self, conn) -> int:
|
||||
"""Reads, calculates, and updates timestamps in manageable chunks."""
|
||||
total_updated = 0
|
||||
chunk_size = 50000 # Process 50,000 rows at a time
|
||||
|
||||
# We select the special 'rowid' column to uniquely identify each row for updating
|
||||
query = f'SELECT rowid, datetime_utc FROM "{self.table_name}" WHERE timestamp_ms IS NULL'
|
||||
|
||||
for chunk_df in pd.read_sql_query(query, conn, chunksize=chunk_size):
|
||||
if chunk_df.empty:
|
||||
break
|
||||
|
||||
logging.info(f"Processing a chunk of {len(chunk_df)} rows...")
|
||||
|
||||
# Calculate the missing timestamps
|
||||
chunk_df['datetime_utc'] = pd.to_datetime(chunk_df['datetime_utc'])
|
||||
chunk_df['timestamp_ms'] = (chunk_df['datetime_utc'].astype('int64') // 10**6)
|
||||
|
||||
# Prepare data for the update command: a list of (timestamp, rowid) tuples
|
||||
update_data = list(zip(chunk_df['timestamp_ms'], chunk_df['rowid']))
|
||||
|
||||
# Use executemany for a fast bulk update
|
||||
cursor = conn.cursor()
|
||||
cursor.executemany(f'UPDATE "{self.table_name}" SET timestamp_ms = ? WHERE rowid = ?', update_data)
|
||||
conn.commit()
|
||||
|
||||
total_updated += len(chunk_df)
|
||||
logging.info(f"Updated {total_updated} rows so far...")
|
||||
|
||||
return total_updated
|
||||
|
||||
def _summarize_update(self, expected_count: int, actual_count: int):
|
||||
"""Prints a final summary of the update process."""
|
||||
logging.info("--- Timestamp Fix Summary ---")
|
||||
print(f"\n{'Status':<25}: COMPLETE")
|
||||
print("-" * 40)
|
||||
print(f"{'Table Processed':<25}: {self.table_name}")
|
||||
print(f"{'Rows Needing Update':<25}: {expected_count:,}")
|
||||
print(f"{'Rows Successfully Updated':<25}: {actual_count:,}")
|
||||
|
||||
if expected_count == actual_count:
|
||||
logging.info("Verification successful: All necessary rows have been updated.")
|
||||
else:
|
||||
logging.warning("Verification warning: The number of updated rows does not match the expected count.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Fix missing millisecond timestamps in the SQLite database.")
|
||||
parser.add_argument("--coin", default="BTC", help="The coin symbol for the table to fix (e.g., BTC).")
|
||||
parser.add_argument(
|
||||
"--log-level",
|
||||
default="normal",
|
||||
choices=['off', 'normal', 'debug'],
|
||||
help="Set the logging level for the script."
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
fixer = DatabaseFixer(log_level=args.log_level, coin=args.coin)
|
||||
fixer.run()
|
||||
154
import_csv.py
Normal file
154
import_csv.py
Normal file
@ -0,0 +1,154 @@
|
||||
import argparse
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import sqlite3
|
||||
import pandas as pd
|
||||
from datetime import datetime
|
||||
|
||||
# Assuming logging_utils.py is in the same directory
|
||||
from logging_utils import setup_logging
|
||||
|
||||
class CsvImporter:
|
||||
"""
|
||||
Imports historical candle data from a large CSV file into the SQLite database,
|
||||
intelligently adding only the missing data.
|
||||
"""
|
||||
|
||||
def __init__(self, log_level: str, csv_path: str, coin: str):
|
||||
setup_logging(log_level, 'CsvImporter')
|
||||
if not os.path.exists(csv_path):
|
||||
logging.error(f"CSV file not found at '{csv_path}'. Please check the path.")
|
||||
sys.exit(1)
|
||||
|
||||
self.csv_path = csv_path
|
||||
self.coin = coin
|
||||
# --- FIX: Corrected the f-string syntax for the table name ---
|
||||
self.table_name = f"{self.coin}_1m"
|
||||
self.db_path = os.path.join("_data", "market_data.db")
|
||||
self.column_mapping = {
|
||||
'Open time': 'datetime_utc',
|
||||
'Open': 'open',
|
||||
'High': 'high',
|
||||
'Low': 'low',
|
||||
'Close': 'close',
|
||||
'Volume': 'volume',
|
||||
'Number of trades': 'number_of_trades'
|
||||
}
|
||||
|
||||
def run(self):
|
||||
"""Orchestrates the entire import and verification process."""
|
||||
logging.info(f"Starting import process for '{self.coin}' from '{self.csv_path}'...")
|
||||
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
conn.execute("PRAGMA journal_mode=WAL;")
|
||||
|
||||
# 1. Get the current state of the database
|
||||
db_oldest, db_newest, initial_row_count = self._get_db_state(conn)
|
||||
|
||||
# 2. Read, clean, and filter the CSV data
|
||||
new_data_df = self._process_and_filter_csv(db_oldest, db_newest)
|
||||
|
||||
if new_data_df.empty:
|
||||
logging.info("No new data to import. Database is already up-to-date with the CSV file.")
|
||||
return
|
||||
|
||||
# 3. Append the new data to the database
|
||||
self._append_to_db(new_data_df, conn)
|
||||
|
||||
# 4. Summarize and verify the import
|
||||
self._summarize_import(initial_row_count, len(new_data_df), conn)
|
||||
|
||||
def _get_db_state(self, conn) -> (datetime, datetime, int):
|
||||
"""Gets the oldest and newest timestamps and total row count from the DB table."""
|
||||
try:
|
||||
oldest = pd.read_sql(f'SELECT MIN(datetime_utc) FROM "{self.table_name}"', conn).iloc[0, 0]
|
||||
newest = pd.read_sql(f'SELECT MAX(datetime_utc) FROM "{self.table_name}"', conn).iloc[0, 0]
|
||||
count = pd.read_sql(f'SELECT COUNT(*) FROM "{self.table_name}"', conn).iloc[0, 0]
|
||||
|
||||
oldest_dt = pd.to_datetime(oldest) if oldest else None
|
||||
newest_dt = pd.to_datetime(newest) if newest else None
|
||||
|
||||
if oldest_dt:
|
||||
logging.info(f"Database contains data from {oldest_dt} to {newest_dt}.")
|
||||
else:
|
||||
logging.info("Database table is empty. A full import will be performed.")
|
||||
|
||||
return oldest_dt, newest_dt, count
|
||||
except pd.io.sql.DatabaseError:
|
||||
logging.info(f"Table '{self.table_name}' not found. It will be created.")
|
||||
return None, None, 0
|
||||
|
||||
def _process_and_filter_csv(self, db_oldest: datetime, db_newest: datetime) -> pd.DataFrame:
|
||||
"""Reads the CSV and returns a DataFrame of only the missing data."""
|
||||
logging.info("Reading and processing CSV file. This may take a moment for large files...")
|
||||
df = pd.read_csv(self.csv_path, usecols=self.column_mapping.keys())
|
||||
|
||||
# Clean and format the data
|
||||
df.rename(columns=self.column_mapping, inplace=True)
|
||||
df['datetime_utc'] = pd.to_datetime(df['datetime_utc'])
|
||||
|
||||
# --- FIX: Calculate the millisecond timestamp from the datetime column ---
|
||||
# This converts the datetime to nanoseconds and then to milliseconds.
|
||||
df['timestamp_ms'] = (df['datetime_utc'].astype('int64') // 10**6)
|
||||
|
||||
# Filter the data to find only rows that are outside the range of what's already in the DB
|
||||
if db_oldest and db_newest:
|
||||
# Get data from before the oldest record and after the newest record
|
||||
df_filtered = df[(df['datetime_utc'] < db_oldest) | (df['datetime_utc'] > db_newest)]
|
||||
else:
|
||||
# If the DB is empty, all data is new
|
||||
df_filtered = df
|
||||
|
||||
logging.info(f"Found {len(df_filtered):,} new rows to import.")
|
||||
return df_filtered
|
||||
|
||||
def _append_to_db(self, df: pd.DataFrame, conn):
|
||||
"""Appends the DataFrame to the SQLite table."""
|
||||
logging.info(f"Appending {len(df):,} new rows to the database...")
|
||||
df.to_sql(self.table_name, conn, if_exists='append', index=False)
|
||||
logging.info("Append operation complete.")
|
||||
|
||||
def _summarize_import(self, initial_count: int, added_count: int, conn):
|
||||
"""Prints a final summary and verification of the import."""
|
||||
logging.info("--- Import Summary & Verification ---")
|
||||
|
||||
try:
|
||||
final_count = pd.read_sql(f'SELECT COUNT(*) FROM "{self.table_name}"', conn).iloc[0, 0]
|
||||
new_oldest = pd.read_sql(f'SELECT MIN(datetime_utc) FROM "{self.table_name}"', conn).iloc[0, 0]
|
||||
new_newest = pd.read_sql(f'SELECT MAX(datetime_utc) FROM "{self.table_name}"', conn).iloc[0, 0]
|
||||
|
||||
print(f"\n{'Status':<20}: SUCCESS")
|
||||
print("-" * 40)
|
||||
print(f"{'Initial Row Count':<20}: {initial_count:,}")
|
||||
print(f"{'Rows Added':<20}: {added_count:,}")
|
||||
print(f"{'Final Row Count':<20}: {final_count:,}")
|
||||
print("-" * 40)
|
||||
print(f"{'New Oldest Record':<20}: {new_oldest}")
|
||||
print(f"{'New Newest Record':<20}: {new_newest}")
|
||||
|
||||
# Verification check
|
||||
if final_count == initial_count + added_count:
|
||||
logging.info("Verification successful: Final row count matches expected count.")
|
||||
else:
|
||||
logging.warning("Verification warning: Final row count does not match expected count.")
|
||||
except Exception as e:
|
||||
logging.error(f"Could not generate summary. Error: {e}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Import historical CSV data into the SQLite database.")
|
||||
parser.add_argument("--file", required=True, help="Path to the large CSV file to import.")
|
||||
parser.add_argument("--coin", default="BTC", help="The coin symbol for this data (e.g., BTC).")
|
||||
parser.add_argument(
|
||||
"--log-level",
|
||||
default="normal",
|
||||
choices=['off', 'normal', 'debug'],
|
||||
help="Set the logging level for the script."
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
importer = CsvImporter(log_level=args.log_level, csv_path=args.file, coin=args.coin)
|
||||
importer.run()
|
||||
|
||||
|
||||
238
live_candle_fetcher.py
Normal file
238
live_candle_fetcher.py
Normal file
@ -0,0 +1,238 @@
|
||||
import argparse
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import time
|
||||
from datetime import datetime, timezone
|
||||
from hyperliquid.info import Info
|
||||
from hyperliquid.utils import constants
|
||||
import sqlite3
|
||||
from queue import Queue
|
||||
from threading import Thread
|
||||
|
||||
from logging_utils import setup_logging
|
||||
|
||||
class LiveCandleFetcher:
|
||||
"""
|
||||
Connects to Hyperliquid to maintain a complete and up-to-date database of
|
||||
1-minute candles using a robust producer-consumer architecture to prevent
|
||||
data corruption and duplication.
|
||||
"""
|
||||
|
||||
def __init__(self, log_level: str, coins: list):
|
||||
setup_logging(log_level, 'LiveCandleFetcher')
|
||||
self.db_path = os.path.join("_data", "market_data.db")
|
||||
self.coins_to_watch = set(coins)
|
||||
if not self.coins_to_watch:
|
||||
logging.error("No coins provided to watch. Exiting.")
|
||||
sys.exit(1)
|
||||
|
||||
self.info = Info(constants.MAINNET_API_URL, skip_ws=False)
|
||||
self.candle_queue = Queue() # Thread-safe queue for candles
|
||||
self._ensure_tables_exist()
|
||||
|
||||
def _ensure_tables_exist(self):
|
||||
"""
|
||||
Ensures that all necessary tables are created with the correct schema and PRIMARY KEY.
|
||||
If a table exists with an incorrect schema, it attempts to migrate the data.
|
||||
"""
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
for coin in self.coins_to_watch:
|
||||
table_name = f"{coin}_1m"
|
||||
cursor = conn.cursor()
|
||||
cursor.execute(f"PRAGMA table_info('{table_name}')")
|
||||
columns = cursor.fetchall()
|
||||
|
||||
if columns:
|
||||
pk_found = any(col[1] == 'timestamp_ms' and col[5] == 1 for col in columns)
|
||||
if not pk_found:
|
||||
logging.warning(f"Schema migration needed for table '{table_name}': 'timestamp_ms' is not the PRIMARY KEY.")
|
||||
logging.warning("Attempting to automatically rebuild the table...")
|
||||
try:
|
||||
# 1. Rename old table
|
||||
conn.execute(f'ALTER TABLE "{table_name}" RENAME TO "{table_name}_old"')
|
||||
logging.info(f" -> Renamed existing table to '{table_name}_old'.")
|
||||
|
||||
# 2. Create new table with correct schema
|
||||
self._create_candle_table(conn, table_name)
|
||||
logging.info(f" -> Created new '{table_name}' table with correct schema.")
|
||||
|
||||
# 3. Copy unique data from old table to new table
|
||||
conn.execute(f'''
|
||||
INSERT OR IGNORE INTO "{table_name}" (datetime_utc, timestamp_ms, open, high, low, close, volume, number_of_trades)
|
||||
SELECT datetime_utc, timestamp_ms, open, high, low, close, volume, number_of_trades
|
||||
FROM "{table_name}_old"
|
||||
''')
|
||||
conn.commit()
|
||||
logging.info(" -> Copied data to new table.")
|
||||
|
||||
# 4. Drop the old table
|
||||
conn.execute(f'DROP TABLE "{table_name}_old"')
|
||||
logging.info(f" -> Removed old table. Migration for '{table_name}' complete.")
|
||||
except Exception as e:
|
||||
logging.error(f"FATAL: Automatic schema migration for '{table_name}' failed: {e}")
|
||||
logging.error("Please delete the database file '_data/market_data.db' manually and restart.")
|
||||
sys.exit(1)
|
||||
else:
|
||||
# If table does not exist, create it
|
||||
self._create_candle_table(conn, table_name)
|
||||
logging.info("Database tables verified.")
|
||||
|
||||
def _create_candle_table(self, conn, table_name: str):
|
||||
"""Creates a new candle table with the correct schema."""
|
||||
conn.execute(f'''
|
||||
CREATE TABLE "{table_name}" (
|
||||
datetime_utc TEXT,
|
||||
timestamp_ms INTEGER PRIMARY KEY,
|
||||
open REAL,
|
||||
high REAL,
|
||||
low REAL,
|
||||
close REAL,
|
||||
volume REAL,
|
||||
number_of_trades INTEGER
|
||||
)
|
||||
''')
|
||||
|
||||
def on_message(self, message):
|
||||
"""
|
||||
Callback function to process incoming candle messages. This is the "Producer".
|
||||
It puts the raw message onto the queue for the DB writer.
|
||||
"""
|
||||
try:
|
||||
if message.get("channel") == "candle":
|
||||
candle_data = message.get("data", {})
|
||||
if candle_data:
|
||||
self.candle_queue.put(candle_data)
|
||||
except Exception as e:
|
||||
logging.error(f"Error in on_message: {e}")
|
||||
|
||||
def _database_writer_thread(self):
|
||||
"""
|
||||
This is the "Consumer" thread. It runs forever, pulling candles from the
|
||||
queue and writing them to the database, ensuring all writes are serial.
|
||||
"""
|
||||
while True:
|
||||
try:
|
||||
candle = self.candle_queue.get()
|
||||
if candle is None: # A signal to stop the thread
|
||||
break
|
||||
|
||||
coin = candle.get('coin')
|
||||
if not coin:
|
||||
continue
|
||||
|
||||
table_name = f"{coin}_1m"
|
||||
record = (
|
||||
datetime.fromtimestamp(candle['t'] / 1000, tz=timezone.utc).strftime('%Y-%m-%d %H:%M:%S'),
|
||||
candle['t'],
|
||||
candle.get('o'), candle.get('h'), candle.get('l'), candle.get('c'),
|
||||
candle.get('v'), candle.get('n')
|
||||
)
|
||||
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
conn.execute(f'''
|
||||
INSERT OR REPLACE INTO "{table_name}" (datetime_utc, timestamp_ms, open, high, low, close, volume, number_of_trades)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
||||
''', record)
|
||||
conn.commit()
|
||||
logging.debug(f"Upserted candle for {coin} at {record[0]}")
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error in database writer thread: {e}")
|
||||
|
||||
def _get_last_timestamp_from_db(self, coin: str) -> int:
|
||||
"""Gets the most recent millisecond timestamp from a coin's 1m table."""
|
||||
table_name = f"{coin}_1m"
|
||||
try:
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
result = conn.execute(f'SELECT MAX(timestamp_ms) FROM "{table_name}"').fetchone()
|
||||
return int(result[0]) if result and result[0] is not None else None
|
||||
except Exception as e:
|
||||
logging.error(f"Could not read last timestamp from table '{table_name}': {e}")
|
||||
return None
|
||||
|
||||
def _fetch_historical_candles(self, coin: str, start_ms: int, end_ms: int):
|
||||
"""Fetches historical candles and puts them on the queue for the writer."""
|
||||
logging.info(f"Fetching historical data for {coin} from {datetime.fromtimestamp(start_ms/1000)}...")
|
||||
current_start = start_ms
|
||||
|
||||
while current_start < end_ms:
|
||||
try:
|
||||
http_info = Info(constants.MAINNET_API_URL, skip_ws=True)
|
||||
batch = http_info.candles_snapshot(coin, "1m", current_start, end_ms)
|
||||
if not batch:
|
||||
break
|
||||
|
||||
for candle in batch:
|
||||
candle['coin'] = coin
|
||||
self.candle_queue.put(candle)
|
||||
|
||||
last_ts = batch[-1]['t']
|
||||
if last_ts < current_start:
|
||||
break
|
||||
current_start = last_ts + 1
|
||||
time.sleep(0.5)
|
||||
except Exception as e:
|
||||
logging.error(f"Error fetching historical chunk for {coin}: {e}")
|
||||
break
|
||||
|
||||
logging.info(f"Historical data fetching for {coin} is complete.")
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
Starts the database writer, catches up on historical data, then
|
||||
subscribes to the WebSocket for live updates.
|
||||
"""
|
||||
db_writer = Thread(target=self._database_writer_thread, daemon=True)
|
||||
db_writer.start()
|
||||
|
||||
logging.info("--- Starting Historical Data Catch-Up Phase ---")
|
||||
now_ms = int(time.time() * 1000)
|
||||
for coin in self.coins_to_watch:
|
||||
last_ts = self._get_last_timestamp_from_db(coin)
|
||||
start_ts = last_ts + 60000 if last_ts else now_ms - (7 * 24 * 60 * 60 * 1000)
|
||||
if start_ts < now_ms:
|
||||
self._fetch_historical_candles(coin, start_ts, now_ms)
|
||||
|
||||
logging.info("--- Historical Catch-Up Complete. Starting Live WebSocket Feed ---")
|
||||
for coin in self.coins_to_watch:
|
||||
# --- FIX: Use a lambda to create a unique callback for each subscription ---
|
||||
# This captures the 'coin' variable and adds it to the message data.
|
||||
callback = lambda msg, c=coin: self.on_message({**msg, 'data': {**msg.get('data',{}), 'coin': c}})
|
||||
subscription = {"type": "candle", "coin": coin, "interval": "1m"}
|
||||
self.info.subscribe(subscription, callback)
|
||||
logging.info(f"Subscribed to 1m candles for {coin}")
|
||||
time.sleep(0.2)
|
||||
|
||||
print("\nListening for live candle data... Press Ctrl+C to stop.")
|
||||
try:
|
||||
while True:
|
||||
time.sleep(1)
|
||||
except KeyboardInterrupt:
|
||||
print("\nStopping WebSocket listener...")
|
||||
self.info.ws_manager.stop()
|
||||
self.candle_queue.put(None)
|
||||
db_writer.join()
|
||||
print("Listener stopped.")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="A hybrid historical and live candle data fetcher for Hyperliquid.")
|
||||
parser.add_argument(
|
||||
"--coins",
|
||||
nargs='+',
|
||||
required=True,
|
||||
help="List of coin symbols to fetch (e.g., BTC ETH)."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--log-level",
|
||||
default="normal",
|
||||
choices=['off', 'normal', 'debug'],
|
||||
help="Set the logging level for the script."
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
fetcher = LiveCandleFetcher(log_level=args.log_level, coins=args.coins)
|
||||
fetcher.run()
|
||||
|
||||
258
live_market.py
Normal file
258
live_market.py
Normal file
@ -0,0 +1,258 @@
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import json
|
||||
import argparse
|
||||
from datetime import datetime, timedelta, timezone
|
||||
from hyperliquid.info import Info
|
||||
from hyperliquid.utils import constants
|
||||
from collections import deque, defaultdict
|
||||
|
||||
# --- Configuration ---
|
||||
MAX_TRADE_HISTORY = 100000
|
||||
all_trades = {
|
||||
"BTC": deque(maxlen=MAX_TRADE_HISTORY),
|
||||
"ETH": deque(maxlen=MAX_TRADE_HISTORY),
|
||||
}
|
||||
latest_raw_trades = {
|
||||
"BTC": None,
|
||||
"ETH": None,
|
||||
}
|
||||
decoded_trade_output = []
|
||||
_lines_printed = 0
|
||||
|
||||
def get_coins_from_strategies() -> set:
|
||||
"""
|
||||
Reads the strategies.json file and returns a unique set of coin symbols
|
||||
from all enabled strategies.
|
||||
"""
|
||||
coins = set()
|
||||
config_path = os.path.join("_data", "strategies.json")
|
||||
try:
|
||||
with open(config_path, 'r') as f:
|
||||
all_configs = json.load(f)
|
||||
for name, config in all_configs.items():
|
||||
if config.get("enabled", False):
|
||||
coin = config.get("parameters", {}).get("coin")
|
||||
if coin:
|
||||
coins.add(coin)
|
||||
print(f"Found {len(coins)} unique coins to watch from enabled strategies: {list(coins)}")
|
||||
return coins
|
||||
except (FileNotFoundError, json.JSONDecodeError) as e:
|
||||
print(f"ERROR: Could not load or parse '{config_path}': {e}", file=sys.stderr)
|
||||
return set()
|
||||
|
||||
def on_message(message):
|
||||
"""
|
||||
Callback function to process incoming trades from the WebSocket and store them.
|
||||
"""
|
||||
try:
|
||||
if message.get("channel") == "trades":
|
||||
for trade in message["data"]:
|
||||
coin = trade['coin']
|
||||
if coin in all_trades:
|
||||
latest_raw_trades[coin] = trade
|
||||
price = float(trade['px'])
|
||||
size = float(trade['sz'])
|
||||
decoded_trade = {
|
||||
"time": datetime.fromtimestamp(trade['time'] / 1000, tz=timezone.utc),
|
||||
"side": "BUY" if trade['side'] == "B" else "SELL",
|
||||
"value": price * size,
|
||||
"users": trade.get('users', [])
|
||||
}
|
||||
all_trades[coin].append(decoded_trade)
|
||||
except (KeyError, TypeError, ValueError):
|
||||
pass
|
||||
|
||||
def build_top_trades_table(title: str, trades: list) -> list:
|
||||
"""Builds the formatted lines for a top-5 trades by value table."""
|
||||
lines = []
|
||||
header = f"{'Time (UTC)':<10} | {'Side':<5} | {'Value (USD)':>20}"
|
||||
lines.append(f"--- {title} ---")
|
||||
lines.append(header)
|
||||
lines.append("-" * len(header))
|
||||
|
||||
top_trades = sorted(trades, key=lambda x: x['value'], reverse=True)[:5]
|
||||
|
||||
for trade in top_trades:
|
||||
lines.append(
|
||||
f"{trade['time'].strftime('%H:%M:%S'):<10} | "
|
||||
f"{trade['side']:<5} | "
|
||||
f"${trade['value']:>18,.2f}"
|
||||
)
|
||||
while len(lines) < 8: lines.append(" " * len(header))
|
||||
return lines
|
||||
|
||||
def build_top_takers_table(title: str, trades: list) -> list:
|
||||
"""Analyzes a list of trades to find the top 5 takers by total volume."""
|
||||
lines = []
|
||||
header = f"{'#':<2} | {'Taker Address':<15} | {'Total Volume (USD)':>20}"
|
||||
lines.append(f"--- {title} ---")
|
||||
lines.append(header)
|
||||
lines.append("-" * len(header))
|
||||
|
||||
volumes = defaultdict(float)
|
||||
for trade in trades:
|
||||
for user in trade['users']:
|
||||
volumes[user] += trade['value']
|
||||
|
||||
top_takers = sorted(volumes.items(), key=lambda item: item[1], reverse=True)[:5]
|
||||
|
||||
for i, (address, volume) in enumerate(top_takers, 1):
|
||||
short_address = f"{address[:6]}...{address[-4:]}"
|
||||
lines.append(f"{i:<2} | {short_address:<15} | ${volume:>18,.2f}")
|
||||
|
||||
while len(lines) < 8: lines.append(" " * len(header))
|
||||
return lines
|
||||
|
||||
def build_top_active_takers_table(title: str, trades: list) -> list:
|
||||
"""Analyzes a list of trades to find the top 5 takers by trade count."""
|
||||
lines = []
|
||||
header = f"{'#':<2} | {'Taker Address':<42} | {'Trade Count':>12} | {'Total Volume (USD)':>20}"
|
||||
lines.append(f"--- {title} ---")
|
||||
lines.append(header)
|
||||
lines.append("-" * len(header))
|
||||
|
||||
taker_data = defaultdict(lambda: {'count': 0, 'volume': 0.0})
|
||||
for trade in trades:
|
||||
for user in trade['users']:
|
||||
taker_data[user]['count'] += 1
|
||||
taker_data[user]['volume'] += trade['value']
|
||||
|
||||
top_takers = sorted(taker_data.items(), key=lambda item: item[1]['count'], reverse=True)[:5]
|
||||
|
||||
for i, (address, data) in enumerate(top_takers, 1):
|
||||
lines.append(f"{i:<2} | {address:<42} | {data['count']:>12} | ${data['volume']:>18,.2f}")
|
||||
|
||||
while len(lines) < 8: lines.append(" " * len(header))
|
||||
return lines
|
||||
|
||||
|
||||
def build_decoded_trade_lines(coin: str) -> list:
|
||||
"""Builds a formatted, multi-line string for a single decoded trade."""
|
||||
trade = latest_raw_trades[coin]
|
||||
if not trade: return ["No trade data yet..."] * 7
|
||||
|
||||
return [
|
||||
f"Time: {datetime.fromtimestamp(trade['time'] / 1000, tz=timezone.utc)}",
|
||||
f"Side: {'BUY' if trade.get('side') == 'B' else 'SELL'}",
|
||||
f"Price: {trade.get('px', 'N/A')}",
|
||||
f"Size: {trade.get('sz', 'N/A')}",
|
||||
f"Trade ID: {trade.get('tid', 'N/A')}",
|
||||
f"Hash: {trade.get('hash', 'N/A')}",
|
||||
f"Users: {', '.join(trade.get('users', []))}"
|
||||
]
|
||||
|
||||
def update_decoded_trade_display():
|
||||
"""
|
||||
Updates the global variable holding the decoded trade output, but only
|
||||
at the 40-second mark of each minute.
|
||||
"""
|
||||
global decoded_trade_output
|
||||
if datetime.now().second == 40:
|
||||
lines = []
|
||||
lines.append("--- Last BTC Trade (Decoded) ---")
|
||||
lines.extend(build_decoded_trade_lines("BTC"))
|
||||
lines.append("")
|
||||
lines.append("--- Last ETH Trade (Decoded) ---")
|
||||
lines.extend(build_decoded_trade_lines("ETH"))
|
||||
decoded_trade_output = lines
|
||||
|
||||
def display_dashboard(view: str):
|
||||
"""Clears the screen and prints the selected dashboard view."""
|
||||
global _lines_printed
|
||||
if _lines_printed > 0: print(f"\x1b[{_lines_printed}A", end="")
|
||||
|
||||
now_utc = datetime.now(timezone.utc)
|
||||
output_lines = []
|
||||
separator = " | "
|
||||
|
||||
time_windows = [
|
||||
("All Time", None), ("Last 24h", timedelta(hours=24)),
|
||||
("Last 1h", timedelta(hours=1)), ("Last 5m", timedelta(minutes=5)),
|
||||
("Last 1m", timedelta(minutes=1)),
|
||||
]
|
||||
|
||||
btc_trades_copy = list(all_trades["BTC"])
|
||||
eth_trades_copy = list(all_trades["ETH"])
|
||||
|
||||
if view == "trades":
|
||||
output_lines.append("--- Top 5 Trades by Value ---")
|
||||
for title, delta in time_windows:
|
||||
btc_trades = [t for t in btc_trades_copy if not delta or t['time'] > now_utc - delta]
|
||||
eth_trades = [t for t in eth_trades_copy if not delta or t['time'] > now_utc - delta]
|
||||
btc_lines = build_top_trades_table(f"BTC - {title}", btc_trades)
|
||||
eth_lines = build_top_trades_table(f"ETH - {title}", eth_trades)
|
||||
for i in range(len(btc_lines)):
|
||||
output_lines.append(f"{btc_lines[i]:<45}{separator}{eth_lines[i] if i < len(eth_lines) else ''}")
|
||||
output_lines.append("")
|
||||
|
||||
elif view == "takers":
|
||||
output_lines.append("--- Top 5 Takers by Volume (Rolling Windows) ---")
|
||||
for title, delta in time_windows[1:]:
|
||||
btc_trades = [t for t in btc_trades_copy if t['time'] > now_utc - delta]
|
||||
eth_trades = [t for t in eth_trades_copy if t['time'] > now_utc - delta]
|
||||
btc_lines = build_top_takers_table(f"BTC - {title}", btc_trades)
|
||||
eth_lines = build_top_takers_table(f"ETH - {title}", eth_trades)
|
||||
for i in range(len(btc_lines)):
|
||||
output_lines.append(f"{btc_lines[i]:<45}{separator}{eth_lines[i] if i < len(eth_lines) else ''}")
|
||||
output_lines.append("")
|
||||
|
||||
elif view == "active_takers":
|
||||
output_lines.append("--- Top 5 Active Takers by Trade Count (Rolling Windows) ---")
|
||||
for title, delta in time_windows[1:]:
|
||||
btc_trades = [t for t in btc_trades_copy if t['time'] > now_utc - delta]
|
||||
eth_trades = [t for t in eth_trades_copy if t['time'] > now_utc - delta]
|
||||
btc_lines = build_top_active_takers_table(f"BTC - {title}", btc_trades)
|
||||
eth_lines = build_top_active_takers_table(f"ETH - {title}", eth_trades)
|
||||
header_width = 85
|
||||
for i in range(len(btc_lines)):
|
||||
output_lines.append(f"{btc_lines[i]:<{header_width}}{separator}{eth_lines[i] if i < len(eth_lines) else ''}")
|
||||
output_lines.append("")
|
||||
|
||||
if decoded_trade_output:
|
||||
output_lines.extend(decoded_trade_output)
|
||||
else:
|
||||
for _ in range(17): output_lines.append("")
|
||||
|
||||
final_output = "\n".join(output_lines) + "\n\x1b[J"
|
||||
print(final_output, end="")
|
||||
|
||||
_lines_printed = len(output_lines)
|
||||
sys.stdout.flush()
|
||||
|
||||
|
||||
def main():
|
||||
"""Main function to set up the WebSocket and run the display loop."""
|
||||
parser = argparse.ArgumentParser(description="Live market data dashboard for Hyperliquid.")
|
||||
parser.add_argument("--view", default="trades", choices=['trades', 'takers', 'active_takers'],
|
||||
help="The data view to display: 'trades' (default), 'takers', or 'active_takers'.")
|
||||
args = parser.parse_args()
|
||||
|
||||
coins_to_watch = get_coins_from_strategies()
|
||||
if not ("BTC" in coins_to_watch and "ETH" in coins_to_watch):
|
||||
print("This script is configured to display BTC and ETH. Please ensure they are in your strategies.", file=sys.stderr)
|
||||
return
|
||||
|
||||
info = Info(constants.MAINNET_API_URL, skip_ws=False)
|
||||
|
||||
for coin in ["BTC", "ETH"]:
|
||||
trade_subscription = {"type": "trades", "coin": coin}
|
||||
info.subscribe(trade_subscription, on_message)
|
||||
print(f"Subscribed to Trades for {coin}")
|
||||
time.sleep(0.2)
|
||||
|
||||
print(f"\nDisplaying live '{args.view}' summary... Press Ctrl+C to stop.")
|
||||
try:
|
||||
while True:
|
||||
update_decoded_trade_display()
|
||||
display_dashboard(view=args.view)
|
||||
time.sleep(1)
|
||||
except KeyboardInterrupt:
|
||||
print("\nStopping WebSocket listener...")
|
||||
info.ws_manager.stop()
|
||||
print("Listener stopped.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
|
||||
187
live_market_utils.py
Normal file
187
live_market_utils.py
Normal file
@ -0,0 +1,187 @@
|
||||
import logging
|
||||
import json
|
||||
import time
|
||||
import os
|
||||
import traceback
|
||||
import sys
|
||||
from hyperliquid.info import Info
|
||||
from hyperliquid.utils import constants
|
||||
|
||||
from logging_utils import setup_logging
|
||||
|
||||
# --- Configuration for standalone error logging ---
|
||||
LOGS_DIR = "_logs"
|
||||
ERROR_LOG_FILE = os.path.join(LOGS_DIR, "live_market_errors.log")
|
||||
|
||||
def log_error(error_message: str, include_traceback: bool = True):
|
||||
"""A simple, robust file logger for any errors."""
|
||||
try:
|
||||
if not os.path.exists(LOGS_DIR):
|
||||
os.makedirs(LOGS_DIR)
|
||||
|
||||
with open(ERROR_LOG_FILE, 'a') as f:
|
||||
timestamp = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime())
|
||||
f.write(f"--- ERROR at {timestamp} UTC ---\n")
|
||||
f.write(error_message + "\n")
|
||||
if include_traceback:
|
||||
f.write(traceback.format_exc() + "\n")
|
||||
f.write("="*50 + "\n")
|
||||
except Exception:
|
||||
print(f"CRITICAL: Failed to write to error log file: {error_message}", file=sys.stderr)
|
||||
|
||||
|
||||
def on_message(message, shared_prices_dict):
|
||||
"""
|
||||
Callback function to process incoming WebSocket messages for 'bbo' and 'trades'
|
||||
and update the shared memory dictionary.
|
||||
"""
|
||||
try:
|
||||
logging.debug(f"Received WebSocket message: {message}")
|
||||
channel = message.get("channel")
|
||||
|
||||
# --- Parser 1: Handle Best Bid/Offer messages ---
|
||||
if channel == "bbo":
|
||||
data = message.get("data")
|
||||
if not data:
|
||||
logging.warning("BBO message received with no data.")
|
||||
return
|
||||
|
||||
coin = data.get("coin")
|
||||
if not coin:
|
||||
logging.warning("BBO data received with no coin identifier.")
|
||||
return
|
||||
|
||||
bid_ask_data = data.get("bbo")
|
||||
|
||||
if not bid_ask_data or not isinstance(bid_ask_data, list) or len(bid_ask_data) < 2:
|
||||
logging.warning(f"[{coin}] Received BBO message with invalid 'bbo' array: {bid_ask_data}")
|
||||
return
|
||||
|
||||
try:
|
||||
bid_price_str = bid_ask_data[0].get('px')
|
||||
ask_price_str = bid_ask_data[1].get('px')
|
||||
|
||||
if not bid_price_str or not ask_price_str:
|
||||
logging.warning(f"[{coin}] BBO data missing 'px' field.")
|
||||
return
|
||||
|
||||
bid_price = float(bid_price_str)
|
||||
ask_price = float(ask_price_str)
|
||||
|
||||
# Update the shared dictionary for Bid and Ask
|
||||
shared_prices_dict[f"{coin}_bid"] = bid_price
|
||||
shared_prices_dict[f"{coin}_ask"] = ask_price
|
||||
|
||||
logging.info(f"Updated {coin} (BBO): Bid={bid_price:.4f}, Ask={ask_price:.4f}")
|
||||
|
||||
except (ValueError, TypeError, IndexError) as e:
|
||||
logging.error(f"[{coin}] Error parsing BBO data: {e}. Data: {bid_ask_data}")
|
||||
|
||||
# --- Parser 2: Handle Live Trade messages ---
|
||||
elif channel == "trades":
|
||||
trade_list = message.get("data")
|
||||
|
||||
if not trade_list or not isinstance(trade_list, list) or len(trade_list) == 0:
|
||||
logging.warning(f"Received 'trades' message with invalid data: {trade_list}")
|
||||
return
|
||||
|
||||
# Process all trades in the batch
|
||||
for trade in trade_list:
|
||||
try:
|
||||
coin = trade.get("coin")
|
||||
price_str = trade.get("px")
|
||||
|
||||
if not coin or not price_str:
|
||||
logging.warning(f"Trade data missing 'coin' or 'px': {trade}")
|
||||
continue
|
||||
|
||||
price = float(price_str)
|
||||
|
||||
# Update the shared dictionary for the "Live Price" column
|
||||
shared_prices_dict[coin] = price
|
||||
|
||||
logging.info(f"Updated {coin} (Live Price) to last trade: {price:.4f}")
|
||||
|
||||
except (ValueError, TypeError) as e:
|
||||
logging.error(f"Error parsing trade data: {e}. Data: {trade}")
|
||||
|
||||
except Exception as e:
|
||||
log_error(f"Error in WebSocket on_message: {e}")
|
||||
|
||||
def start_live_feed(shared_prices_dict, coins_to_watch: list, log_level='off'):
|
||||
"""
|
||||
Main function for the WebSocket process.
|
||||
Subscribes to BOTH 'bbo' and 'trades' for all watched coins.
|
||||
"""
|
||||
setup_logging(log_level, 'LiveMarketFeed_Combined')
|
||||
|
||||
info = None
|
||||
callback = lambda msg: on_message(msg, shared_prices_dict)
|
||||
|
||||
def connect_and_subscribe():
|
||||
"""Establishes a new WebSocket connection and subscribes to both streams."""
|
||||
try:
|
||||
logging.info("Connecting to Hyperliquid WebSocket...")
|
||||
new_info = Info(constants.MAINNET_API_URL, skip_ws=False)
|
||||
|
||||
# --- MODIFIED: Subscribe to 'bbo' AND 'trades' for each coin ---
|
||||
for coin in coins_to_watch:
|
||||
# Subscribe to Best Bid/Offer
|
||||
bbo_sub = {"type": "bbo", "coin": coin}
|
||||
new_info.subscribe(bbo_sub, callback)
|
||||
logging.info(f"Subscribed to 'bbo' for {coin}.")
|
||||
|
||||
# Subscribe to Live Trades
|
||||
trades_sub = {"type": "trades", "coin": coin}
|
||||
new_info.subscribe(trades_sub, callback)
|
||||
logging.info(f"Subscribed to 'trades' for {coin}.")
|
||||
|
||||
logging.info("WebSocket connected and all subscriptions sent.")
|
||||
return new_info
|
||||
except Exception as e:
|
||||
log_error(f"Failed to connect to WebSocket: {e}")
|
||||
return None
|
||||
|
||||
info = connect_and_subscribe()
|
||||
|
||||
if info is None:
|
||||
logging.critical("Initial WebSocket connection failed. Exiting process.")
|
||||
log_error("Initial WebSocket connection failed. Exiting process.", include_traceback=False)
|
||||
time.sleep(10) # Wait before letting the process manager restart it
|
||||
return
|
||||
|
||||
logging.info("Starting Combined (BBO + Trades) live price feed process.")
|
||||
|
||||
try:
|
||||
while True:
|
||||
# --- Watchdog Logic ---
|
||||
time.sleep(15) # Check the connection every 15 seconds
|
||||
|
||||
if not info.ws_manager.is_alive():
|
||||
error_msg = "WebSocket connection lost. Attempting to reconnect..."
|
||||
logging.warning(error_msg)
|
||||
log_error(error_msg, include_traceback=False) # Log it to the file
|
||||
|
||||
try:
|
||||
info.ws_manager.stop() # Clean up old manager
|
||||
except Exception as e:
|
||||
log_error(f"Error stopping old ws_manager: {e}")
|
||||
|
||||
info = connect_and_subscribe()
|
||||
|
||||
if info is None:
|
||||
logging.error("Reconnect failed, will retry in 15s.")
|
||||
else:
|
||||
logging.info("Successfully reconnected to WebSocket.")
|
||||
else:
|
||||
logging.debug("Watchdog check: WebSocket connection is active.")
|
||||
|
||||
except KeyboardInterrupt:
|
||||
logging.info("Stopping WebSocket listener...")
|
||||
except Exception as e:
|
||||
log_error(f"Live Market Feed process crashed: {e}")
|
||||
finally:
|
||||
if info and info.ws_manager:
|
||||
info.ws_manager.stop()
|
||||
logging.info("Combined Listener stopped.")
|
||||
|
||||
@ -1,5 +1,29 @@
|
||||
import logging
|
||||
import sys
|
||||
from datetime import datetime
|
||||
|
||||
class LocalTimeFormatter(logging.Formatter):
|
||||
"""
|
||||
Custom formatter to display time with milliseconds and a (UTC+HH) offset.
|
||||
"""
|
||||
def formatTime(self, record, datefmt=None):
|
||||
# Convert log record's creation time to a local, timezone-aware datetime object
|
||||
dt = datetime.fromtimestamp(record.created).astimezone()
|
||||
|
||||
# Format the main time part
|
||||
time_part = dt.strftime('%Y-%m-%d %H:%M:%S')
|
||||
|
||||
# Get the UTC offset and format it as (UTC+HH)
|
||||
offset = dt.utcoffset()
|
||||
offset_str = ""
|
||||
if offset is not None:
|
||||
offset_hours = int(offset.total_seconds() / 3600)
|
||||
sign = '+' if offset_hours >= 0 else ''
|
||||
offset_str = f" (UTC{sign}{offset_hours})"
|
||||
|
||||
# --- FIX: Cast record.msecs from float to int before formatting ---
|
||||
# Combine time, milliseconds, and the offset string
|
||||
return f"{time_part},{int(record.msecs):03d}{offset_str}"
|
||||
|
||||
def setup_logging(log_level: str, process_name: str):
|
||||
"""
|
||||
@ -29,10 +53,9 @@ def setup_logging(log_level: str, process_name: str):
|
||||
|
||||
handler = logging.StreamHandler(sys.stdout)
|
||||
|
||||
# --- FIX: Added a date format that includes the timezone name (%Z) ---
|
||||
formatter = logging.Formatter(
|
||||
f'%(asctime)s - {process_name} - %(levelname)s - %(message)s',
|
||||
datefmt='%Y-%m-%d %H:%M:%S %Z'
|
||||
# This will produce timestamps like: 2025-10-13 14:30:00,123 (UTC+2)
|
||||
formatter = LocalTimeFormatter(
|
||||
f'%(asctime)s - {process_name} - %(levelname)s - %(message)s'
|
||||
)
|
||||
handler.setFormatter(formatter)
|
||||
logger.addHandler(handler)
|
||||
|
||||
772
main_app.py
772
main_app.py
@ -9,197 +9,677 @@ import schedule
|
||||
import sqlite3
|
||||
import pandas as pd
|
||||
from datetime import datetime, timezone
|
||||
import importlib
|
||||
# --- REMOVED: import signal ---
|
||||
# --- REMOVED: from queue import Empty ---
|
||||
|
||||
from logging_utils import setup_logging
|
||||
# --- Using the new high-performance WebSocket utility for live prices ---
|
||||
from live_market_utils import start_live_feed
|
||||
# --- Import the base class for type hinting (optional but good practice) ---
|
||||
from strategies.base_strategy import BaseStrategy
|
||||
|
||||
# --- Configuration ---
|
||||
WATCHED_COINS = ["BTC", "ETH", "SOL", "BNB", "HYPE", "ASTER", "ZEC", "PUMP", "SUI"]
|
||||
COIN_LISTER_SCRIPT = "list_coins.py"
|
||||
MARKET_FEEDER_SCRIPT = "market.py"
|
||||
DATA_FETCHER_SCRIPT = "data_fetcher.py"
|
||||
RESAMPLER_SCRIPT = "resampler.py" # Restored resampler script
|
||||
PRICE_DATA_FILE = os.path.join("_data", "current_prices.json")
|
||||
LIVE_CANDLE_FETCHER_SCRIPT = "live_candle_fetcher.py"
|
||||
RESAMPLER_SCRIPT = "resampler.py"
|
||||
# --- REMOVED: Market Cap Fetcher ---
|
||||
# --- REMOVED: trade_executor.py is no longer a script ---
|
||||
DASHBOARD_DATA_FETCHER_SCRIPT = "dashboard_data_fetcher.py"
|
||||
STRATEGY_CONFIG_FILE = os.path.join("_data", "strategies.json")
|
||||
DB_PATH = os.path.join("_data", "market_data.db")
|
||||
STATUS_FILE = os.path.join("_data", "fetcher_status.json")
|
||||
# --- REMOVED: Market Cap File ---
|
||||
LOGS_DIR = "_logs"
|
||||
TRADE_EXECUTOR_STATUS_FILE = os.path.join(LOGS_DIR, "trade_executor_status.json")
|
||||
|
||||
|
||||
def run_market_feeder():
|
||||
"""Target function to run the market.py script in a separate process."""
|
||||
setup_logging('off', 'MarketFeedProcess')
|
||||
logging.info("Market feeder process started.")
|
||||
def format_market_cap(mc_value):
|
||||
"""Formats a large number into a human-readable market cap string."""
|
||||
if not isinstance(mc_value, (int, float)) or mc_value == 0:
|
||||
return "N/A"
|
||||
if mc_value >= 1_000_000_000_000:
|
||||
return f"${mc_value / 1_000_000_000_000:.2f}T"
|
||||
if mc_value >= 1_000_000_000:
|
||||
return f"${mc_value / 1_000_000_000:.2f}B"
|
||||
if mc_value >= 1_000_000:
|
||||
return f"${mc_value / 1_000_000:.2f}M"
|
||||
return f"${mc_value:,.2f}"
|
||||
|
||||
|
||||
def run_live_candle_fetcher():
|
||||
"""Target function to run the live_candle_fetcher.py script in a resilient loop."""
|
||||
|
||||
# --- GRACEFUL SHUTDOWN HANDLER ---
|
||||
import signal
|
||||
shutdown_requested = False
|
||||
|
||||
def handle_shutdown_signal(signum, frame):
|
||||
nonlocal shutdown_requested
|
||||
# Use print here as logging may not be set up
|
||||
print(f"[CandleFetcher] Shutdown signal ({signum}) received. Will stop after current run.")
|
||||
shutdown_requested = True
|
||||
|
||||
signal.signal(signal.SIGTERM, handle_shutdown_signal)
|
||||
signal.signal(signal.SIGINT, handle_shutdown_signal)
|
||||
# --- END GRACEFUL SHUTDOWN HANDLER ---
|
||||
|
||||
log_file = os.path.join(LOGS_DIR, "live_candle_fetcher.log")
|
||||
|
||||
while not shutdown_requested: # <-- MODIFIED
|
||||
process = None
|
||||
try:
|
||||
with open(log_file, 'a') as f:
|
||||
command = [sys.executable, LIVE_CANDLE_FETCHER_SCRIPT, "--coins"] + WATCHED_COINS + ["--log-level", "off"]
|
||||
f.write(f"\n--- Starting {LIVE_CANDLE_FETCHER_SCRIPT} at {datetime.now()} ---\n")
|
||||
|
||||
# Use Popen instead of run to be non-blocking
|
||||
process = subprocess.Popen(command, stdout=f, stderr=subprocess.STDOUT)
|
||||
|
||||
# Poll the process and check for shutdown request
|
||||
while process.poll() is None and not shutdown_requested:
|
||||
time.sleep(0.5) # Poll every 500ms
|
||||
|
||||
if shutdown_requested and process.poll() is None:
|
||||
print(f"[CandleFetcher] Terminating subprocess {LIVE_CANDLE_FETCHER_SCRIPT}...")
|
||||
process.terminate() # Terminate the child script
|
||||
process.wait() # Wait for it to exit
|
||||
print(f"[CandleFetcher] Subprocess terminated.")
|
||||
|
||||
except (subprocess.CalledProcessError, Exception) as e:
|
||||
if shutdown_requested:
|
||||
break # Don't restart if we're shutting down
|
||||
with open(log_file, 'a') as f:
|
||||
f.write(f"\n--- PROCESS ERROR at {datetime.now()} ---\n")
|
||||
f.write(f"Live candle fetcher failed: {e}. Restarting...\n")
|
||||
time.sleep(5)
|
||||
|
||||
if shutdown_requested:
|
||||
break # Exit outer loop
|
||||
|
||||
print("[CandleFetcher] Live candle fetcher shutting down.")
|
||||
|
||||
|
||||
def run_resampler_job(timeframes_to_generate: list):
|
||||
"""Defines the job for the resampler, redirecting output to a log file."""
|
||||
log_file = os.path.join(LOGS_DIR, "resampler.log")
|
||||
try:
|
||||
# Pass the log level to the script
|
||||
subprocess.run([sys.executable, MARKET_FEEDER_SCRIPT, "--log-level", "off"], check=True)
|
||||
except subprocess.CalledProcessError as e:
|
||||
logging.error(f"Market feeder script failed with error: {e}")
|
||||
except KeyboardInterrupt:
|
||||
logging.info("Market feeder process stopping.")
|
||||
|
||||
|
||||
def run_data_fetcher_job():
|
||||
"""Defines the job to be run by the scheduler for the data fetcher."""
|
||||
logging.info(f"Scheduler starting data_fetcher.py task for {', '.join(WATCHED_COINS)}...")
|
||||
try:
|
||||
command = [sys.executable, DATA_FETCHER_SCRIPT, "--coins"] + WATCHED_COINS + ["--days", "7", "--log-level", "off"]
|
||||
subprocess.run(command, check=True)
|
||||
logging.info("data_fetcher.py task finished successfully.")
|
||||
command = [sys.executable, RESAMPLER_SCRIPT, "--coins"] + WATCHED_COINS + ["--timeframes"] + timeframes_to_generate + ["--log-level", "normal"]
|
||||
with open(log_file, 'a') as f:
|
||||
f.write(f"\n--- Starting resampler.py job at {datetime.now()} ---\n")
|
||||
subprocess.run(command, check=True, stdout=f, stderr=subprocess.STDOUT)
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to run data_fetcher.py job: {e}")
|
||||
with open(log_file, 'a') as f:
|
||||
f.write(f"\n--- SCHEDULER ERROR at {datetime.now()} ---\n")
|
||||
f.write(f"Failed to run resampler.py job: {e}\n")
|
||||
|
||||
|
||||
def data_fetcher_scheduler():
|
||||
"""Schedules and runs the data_fetcher.py script periodically."""
|
||||
setup_logging('off', 'DataFetcherScheduler')
|
||||
run_data_fetcher_job()
|
||||
schedule.every(1).minutes.do(run_data_fetcher_job)
|
||||
logging.info("Data fetcher scheduled to run every 1 minute.")
|
||||
while True:
|
||||
schedule.run_pending()
|
||||
time.sleep(1)
|
||||
def resampler_scheduler(timeframes_to_generate: list):
|
||||
"""Schedules the resampler.py script."""
|
||||
|
||||
# --- Restored Resampler Functions ---
|
||||
def run_resampler_job():
|
||||
"""Defines the job to be run by the scheduler for the resampler."""
|
||||
logging.info(f"Scheduler starting resampler.py task for {', '.join(WATCHED_COINS)}...")
|
||||
try:
|
||||
# Uses default timeframes configured within resampler.py
|
||||
command = [sys.executable, RESAMPLER_SCRIPT, "--coins"] + WATCHED_COINS + ["--log-level", "off"]
|
||||
subprocess.run(command, check=True)
|
||||
logging.info("resampler.py task finished successfully.")
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to run resampler.py job: {e}")
|
||||
# --- GRACEFUL SHUTDOWN HANDLER ---
|
||||
import signal
|
||||
shutdown_requested = False
|
||||
|
||||
def handle_shutdown_signal(signum, frame):
|
||||
nonlocal shutdown_requested
|
||||
try:
|
||||
logging.info(f"Shutdown signal ({signum}) received. Exiting loop...")
|
||||
except NameError:
|
||||
print(f"[ResamplerScheduler] Shutdown signal ({signum}) received. Exiting loop...")
|
||||
shutdown_requested = True
|
||||
|
||||
signal.signal(signal.SIGTERM, handle_shutdown_signal)
|
||||
signal.signal(signal.SIGINT, handle_shutdown_signal)
|
||||
# --- END GRACEFUL SHUTDOWN HANDLER ---
|
||||
|
||||
|
||||
def resampler_scheduler():
|
||||
"""Schedules and runs the resampler.py script periodically."""
|
||||
setup_logging('off', 'ResamplerScheduler')
|
||||
run_resampler_job()
|
||||
schedule.every(4).minutes.do(run_resampler_job)
|
||||
logging.info("Resampler scheduled to run every 4 minutes.")
|
||||
while True:
|
||||
run_resampler_job(timeframes_to_generate)
|
||||
# Schedule to run every minute at the :01 second mark
|
||||
schedule.every().minute.at(":01").do(run_resampler_job, timeframes_to_generate=timeframes_to_generate)
|
||||
logging.info("Resampler scheduled to run every minute at :01.")
|
||||
|
||||
while not shutdown_requested: # <-- MODIFIED
|
||||
schedule.run_pending()
|
||||
time.sleep(1)
|
||||
# --- End of Restored Functions ---
|
||||
time.sleep(0.5) # Check every 500ms to not miss the scheduled time and be responsive
|
||||
|
||||
logging.info("ResamplerScheduler shutting down.")
|
||||
|
||||
|
||||
# --- REMOVED: run_market_cap_fetcher_job function ---
|
||||
|
||||
# --- REMOVED: market_cap_fetcher_scheduler function ---
|
||||
|
||||
|
||||
def run_trade_executor(order_execution_queue: multiprocessing.Queue):
|
||||
"""
|
||||
Target function to run the TradeExecutor class in a resilient loop.
|
||||
It now consumes from the order_execution_queue.
|
||||
"""
|
||||
|
||||
# --- GRACEFUL SHUTDOWN HANDLER ---
|
||||
import signal
|
||||
|
||||
def handle_shutdown_signal(signum, frame):
|
||||
# We can just raise KeyboardInterrupt, as it's handled below
|
||||
logging.info(f"Shutdown signal ({signum}) received. Initiating graceful exit...")
|
||||
raise KeyboardInterrupt
|
||||
|
||||
signal.signal(signal.SIGTERM, handle_shutdown_signal)
|
||||
# --- END GRACEFUL SHUTDOWN HANDLER ---
|
||||
|
||||
log_file_path = os.path.join(LOGS_DIR, "trade_executor.log")
|
||||
try:
|
||||
sys.stdout = open(log_file_path, 'a', buffering=1)
|
||||
sys.stderr = sys.stdout
|
||||
except Exception as e:
|
||||
print(f"Failed to open log file for TradeExecutor: {e}")
|
||||
|
||||
setup_logging('normal', f"TradeExecutor")
|
||||
logging.info("\n--- Starting Trade Executor process ---")
|
||||
|
||||
while True:
|
||||
try:
|
||||
from trade_executor import TradeExecutor
|
||||
|
||||
executor = TradeExecutor(log_level="normal", order_execution_queue=order_execution_queue)
|
||||
|
||||
# --- REVERTED: Call executor.run() directly ---
|
||||
executor.run()
|
||||
|
||||
except KeyboardInterrupt:
|
||||
logging.info("Trade Executor interrupted. Exiting.")
|
||||
return
|
||||
except Exception as e:
|
||||
logging.error(f"Trade Executor failed: {e}. Restarting...\n", exc_info=True)
|
||||
time.sleep(10)
|
||||
|
||||
def run_position_manager(trade_signal_queue: multiprocessing.Queue, order_execution_queue: multiprocessing.Queue):
|
||||
"""
|
||||
Target function to run the PositionManager class in a resilient loop.
|
||||
Consumes from trade_signal_queue, produces for order_execution_queue.
|
||||
"""
|
||||
|
||||
# --- GRACEFUL SHUTDOWN HANDLER ---
|
||||
import signal
|
||||
|
||||
def handle_shutdown_signal(signum, frame):
|
||||
# Raise KeyboardInterrupt, as it's handled by the loop
|
||||
logging.info(f"Shutdown signal ({signum}) received. Initiating graceful exit...")
|
||||
raise KeyboardInterrupt
|
||||
|
||||
signal.signal(signal.SIGTERM, handle_shutdown_signal)
|
||||
# --- END GRACEFUL SHUTDOWN HANDLER ---
|
||||
|
||||
log_file_path = os.path.join(LOGS_DIR, "position_manager.log")
|
||||
try:
|
||||
sys.stdout = open(log_file_path, 'a', buffering=1)
|
||||
sys.stderr = sys.stdout
|
||||
except Exception as e:
|
||||
print(f"Failed to open log file for PositionManager: {e}")
|
||||
|
||||
setup_logging('normal', f"PositionManager")
|
||||
logging.info("\n--- Starting Position Manager process ---")
|
||||
|
||||
while True:
|
||||
try:
|
||||
from position_manager import PositionManager
|
||||
|
||||
manager = PositionManager(
|
||||
log_level="normal",
|
||||
trade_signal_queue=trade_signal_queue,
|
||||
order_execution_queue=order_execution_queue
|
||||
)
|
||||
|
||||
# --- REVERTED: Call manager.run() directly ---
|
||||
manager.run()
|
||||
|
||||
except KeyboardInterrupt:
|
||||
logging.info("Position Manager interrupted. Exiting.")
|
||||
return
|
||||
except Exception as e:
|
||||
logging.error(f"Position Manager failed: {e}. Restarting...\n", exc_info=True)
|
||||
time.sleep(10)
|
||||
|
||||
|
||||
def run_strategy(strategy_name: str, config: dict, trade_signal_queue: multiprocessing.Queue):
|
||||
"""
|
||||
This function BECOMES the strategy runner. It is executed as a separate
|
||||
process and pushes signals to the shared queue.
|
||||
"""
|
||||
# These imports only happen in the new, lightweight process
|
||||
import importlib
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import logging
|
||||
import signal # <-- ADDED
|
||||
from logging_utils import setup_logging
|
||||
from strategies.base_strategy import BaseStrategy
|
||||
|
||||
# --- GRACEFUL SHUTDOWN HANDLER ---
|
||||
def handle_shutdown_signal(signum, frame):
|
||||
# Raise KeyboardInterrupt, as it's handled by the loop
|
||||
try:
|
||||
logging.info(f"Shutdown signal ({signum}) received. Initiating graceful exit...")
|
||||
except NameError:
|
||||
print(f"[Strategy-{strategy_name}] Shutdown signal ({signum}) received. Initiating graceful exit...")
|
||||
raise KeyboardInterrupt
|
||||
|
||||
signal.signal(signal.SIGTERM, handle_shutdown_signal)
|
||||
# --- END GRACEFUL SHUTDOWN HANDLER ---
|
||||
|
||||
# --- Setup logging to file for this specific process ---
|
||||
log_file_path = os.path.join(LOGS_DIR, f"strategy_{strategy_name}.log")
|
||||
try:
|
||||
sys.stdout = open(log_file_path, 'a', buffering=1) # 1 = line buffering
|
||||
sys.stderr = sys.stdout
|
||||
except Exception as e:
|
||||
print(f"Failed to open log file for {strategy_name}: {e}")
|
||||
|
||||
setup_logging('normal', f"Strategy-{strategy_name}")
|
||||
|
||||
while True:
|
||||
try:
|
||||
logging.info(f"--- Starting strategy '{strategy_name}' ---")
|
||||
|
||||
if 'class' not in config:
|
||||
logging.error(f"Strategy config for '{strategy_name}' is missing the 'class' key. Exiting.")
|
||||
return
|
||||
|
||||
module_path, class_name = config['class'].rsplit('.', 1)
|
||||
module = importlib.import_module(module_path)
|
||||
StrategyClass = getattr(module, class_name)
|
||||
|
||||
strategy = StrategyClass(strategy_name, config['parameters'], trade_signal_queue)
|
||||
|
||||
if config.get("is_event_driven", False):
|
||||
logging.info(f"Starting EVENT-DRIVEN logic loop...")
|
||||
strategy.run_event_loop() # This is a blocking call
|
||||
else:
|
||||
logging.info(f"Starting POLLING logic loop...")
|
||||
strategy.run_polling_loop() # This is the original blocking call
|
||||
|
||||
# --- REVERTED: Added back simple KeyboardInterrupt handler ---
|
||||
except KeyboardInterrupt:
|
||||
logging.info(f"Strategy {strategy_name} process stopping.")
|
||||
return
|
||||
except Exception as e:
|
||||
# --- REVERTED: Removed specific check for KeyboardInterrupt ---
|
||||
logging.error(f"Strategy '{strategy_name}' failed: {e}", exc_info=True)
|
||||
logging.info("Restarting strategy in 10 seconds...")
|
||||
time.sleep(10)
|
||||
|
||||
|
||||
def run_dashboard_data_fetcher():
|
||||
"""Target function to run the dashboard_data_fetcher.py script."""
|
||||
|
||||
# --- GRACEFUL SHUTDOWN HANDLER ---
|
||||
import signal
|
||||
|
||||
def handle_shutdown_signal(signum, frame):
|
||||
# Raise KeyboardInterrupt, as it's handled by the loop
|
||||
try:
|
||||
logging.info(f"Shutdown signal ({signum}) received. Initiating graceful exit...")
|
||||
except NameError:
|
||||
print(f"[DashboardDataFetcher] Shutdown signal ({signum}) received. Initiating graceful exit...")
|
||||
raise KeyboardInterrupt
|
||||
|
||||
signal.signal(signal.SIGTERM, handle_shutdown_signal)
|
||||
# --- END GRACEFUL SHUTDOWN HANDLER ---
|
||||
|
||||
log_file = os.path.join(LOGS_DIR, "dashboard_data_fetcher.log")
|
||||
while True:
|
||||
try:
|
||||
with open(log_file, 'a') as f:
|
||||
f.write(f"\n--- Starting Dashboard Data Fetcher at {datetime.now()} ---\n")
|
||||
subprocess.run([sys.executable, DASHBOARD_DATA_FETCHER_SCRIPT, "--log-level", "normal"], check=True, stdout=f, stderr=subprocess.STDOUT)
|
||||
except KeyboardInterrupt: # --- MODIFIED: Added to catch interrupt ---
|
||||
logging.info("Dashboard Data Fetcher stopping.")
|
||||
break
|
||||
except (subprocess.CalledProcessError, Exception) as e:
|
||||
with open(log_file, 'a') as f:
|
||||
f.write(f"\n--- PROCESS ERROR at {datetime.now()} ---\n")
|
||||
f.write(f"Dashboard Data Fetcher failed: {e}. Restarting...\n")
|
||||
time.sleep(10)
|
||||
|
||||
|
||||
class MainApp:
|
||||
def __init__(self, coins_to_watch: list):
|
||||
def __init__(self, coins_to_watch: list, processes: dict, strategy_configs: dict, shared_prices: dict):
|
||||
self.watched_coins = coins_to_watch
|
||||
self.shared_prices = shared_prices
|
||||
self.prices = {}
|
||||
self.last_db_update_info = "Initializing..."
|
||||
self._lines_printed = 0 # To track how many lines we printed last time
|
||||
# --- REMOVED: self.market_caps ---
|
||||
self.open_positions = {}
|
||||
self.background_processes = processes
|
||||
self.process_status = {}
|
||||
self.strategy_configs = strategy_configs
|
||||
self.strategy_statuses = {}
|
||||
|
||||
def read_prices(self):
|
||||
"""Reads the latest prices from the JSON file."""
|
||||
if not os.path.exists(PRICE_DATA_FILE):
|
||||
return
|
||||
"""Reads the latest prices directly from the shared memory dictionary."""
|
||||
try:
|
||||
with open(PRICE_DATA_FILE, 'r', encoding='utf-8') as f:
|
||||
self.prices = json.load(f)
|
||||
except (json.JSONDecodeError, IOError):
|
||||
logging.debug("Could not read price file (might be locked).")
|
||||
# --- FIX: Use .copy() for thread-safe iteration ---
|
||||
self.prices = self.shared_prices.copy()
|
||||
except Exception as e:
|
||||
logging.debug(f"Could not read from shared prices dict: {e}")
|
||||
|
||||
def get_overall_db_status(self):
|
||||
"""Reads the fetcher status from the status file."""
|
||||
if not os.path.exists(STATUS_FILE):
|
||||
self.last_db_update_info = "Status file not found."
|
||||
return
|
||||
# --- REMOVED: read_market_caps method ---
|
||||
|
||||
def read_strategy_statuses(self):
|
||||
"""Reads the status JSON file for each enabled strategy."""
|
||||
enabled_statuses = {}
|
||||
for name, config in self.strategy_configs.items():
|
||||
if config.get("enabled", False):
|
||||
status_file = os.path.join("_data", f"strategy_status_{name}.json")
|
||||
if os.path.exists(status_file):
|
||||
try:
|
||||
with open(status_file, 'r', encoding='utf-8') as f:
|
||||
enabled_statuses[name] = json.load(f)
|
||||
except (IOError, json.JSONDecodeError):
|
||||
enabled_statuses[name] = {"error": "Could not read status file."}
|
||||
else:
|
||||
enabled_statuses[name] = {"current_signal": "Initializing..."}
|
||||
self.strategy_statuses = enabled_statuses
|
||||
|
||||
def read_executor_status(self):
|
||||
"""Reads the live status file from the trade executor."""
|
||||
if os.path.exists(TRADE_EXECUTOR_STATUS_FILE):
|
||||
try:
|
||||
with open(TRADE_EXECUTOR_STATUS_FILE, 'r', encoding='utf-8') as f:
|
||||
# --- FIX: Read the 'open_positions' key from the file ---
|
||||
status_data = json.load(f)
|
||||
self.open_positions = status_data.get('open_positions', {})
|
||||
except (IOError, json.JSONDecodeError):
|
||||
logging.debug("Could not read trade executor status file.")
|
||||
else:
|
||||
self.open_positions = {}
|
||||
|
||||
def check_process_status(self):
|
||||
"""Checks if the background processes are still running."""
|
||||
for name, process in self.background_processes.items():
|
||||
self.process_status[name] = "Running" if process.is_alive() else "STOPPED"
|
||||
|
||||
def _format_price(self, price_val, width=10):
|
||||
"""Helper function to format prices for the dashboard."""
|
||||
try:
|
||||
with open(STATUS_FILE, 'r', encoding='utf-8') as f:
|
||||
status = json.load(f)
|
||||
coin = status.get("last_updated_coin")
|
||||
timestamp_utc_str = status.get("last_run_timestamp_utc")
|
||||
num_candles = status.get("num_updated_candles", 0)
|
||||
|
||||
if timestamp_utc_str:
|
||||
dt_naive = datetime.strptime(timestamp_utc_str, '%Y-%m-%d %H:%M:%S')
|
||||
dt_utc = dt_naive.replace(tzinfo=timezone.utc)
|
||||
dt_local = dt_utc.astimezone(None)
|
||||
timestamp_display = dt_local.strftime('%Y-%m-%d %H:%M:%S %Z')
|
||||
price_float = float(price_val)
|
||||
if price_float < 1:
|
||||
price_str = f"{price_float:>{width}.6f}"
|
||||
elif price_float < 100:
|
||||
price_str = f"{price_float:>{width}.4f}"
|
||||
else:
|
||||
timestamp_display = "N/A"
|
||||
|
||||
self.last_db_update_info = f"{coin} at {timestamp_display} ({num_candles} candles)"
|
||||
except (IOError, json.JSONDecodeError) as e:
|
||||
self.last_db_update_info = "Error reading status file."
|
||||
logging.error(f"Could not read status file: {e}")
|
||||
price_str = f"{price_float:>{width}.2f}"
|
||||
except (ValueError, TypeError):
|
||||
price_str = f"{'Loading...':>{width}}"
|
||||
return price_str
|
||||
|
||||
def display_dashboard(self):
|
||||
"""Displays a formatted table for prices and DB status without blinking."""
|
||||
# Move the cursor up to overwrite the previous output
|
||||
if self._lines_printed > 0:
|
||||
print(f"\x1b[{self._lines_printed}A", end="")
|
||||
|
||||
# Build the output as a single string
|
||||
output_lines = []
|
||||
output_lines.append("--- Market Dashboard ---")
|
||||
table_width = 26
|
||||
output_lines.append("-" * table_width)
|
||||
output_lines.append(f"{'#':<2} | {'Coin':<6} | {'Live Price':>10} |")
|
||||
output_lines.append("-" * table_width)
|
||||
"""Displays a formatted dashboard with side-by-side tables."""
|
||||
print("\x1b[H\x1b[J", end="") # Clear screen
|
||||
|
||||
left_table_lines = ["--- Market Dashboard ---"]
|
||||
# --- MODIFIED: Adjusted width for new columns ---
|
||||
left_table_width = 65
|
||||
left_table_lines.append("-" * left_table_width)
|
||||
# --- MODIFIED: Replaced Market Cap with Gap ---
|
||||
left_table_lines.append(f"{'#':<2} | {'Coin':^6} | {'Best Bid':>10} | {'Live Price':>10} | {'Best Ask':>10} | {'Gap':>10} |")
|
||||
left_table_lines.append("-" * left_table_width)
|
||||
for i, coin in enumerate(self.watched_coins, 1):
|
||||
price = self.prices.get(coin, "Loading...")
|
||||
output_lines.append(f"{i:<2} | {coin:<6} | {price:>10} |")
|
||||
output_lines.append("-" * table_width)
|
||||
output_lines.append(f"DB Status: Last coin updated -> {self.last_db_update_info}")
|
||||
# --- MODIFIED: Fetch all three price types ---
|
||||
mid_price = self.prices.get(coin, "Loading...")
|
||||
bid_price = self.prices.get(f"{coin}_bid", "Loading...")
|
||||
ask_price = self.prices.get(f"{coin}_ask", "Loading...")
|
||||
|
||||
# --- MODIFIED: Use the new formatting helper ---
|
||||
formatted_mid = self._format_price(mid_price)
|
||||
formatted_bid = self._format_price(bid_price)
|
||||
formatted_ask = self._format_price(ask_price)
|
||||
|
||||
# --- MODIFIED: Calculate gap ---
|
||||
gap_str = f"{'Loading...':>10}"
|
||||
try:
|
||||
# Calculate the spread
|
||||
gap_val = float(ask_price) - float(bid_price)
|
||||
# Format gap with high precision, similar to price
|
||||
if gap_val < 1:
|
||||
gap_str = f"{gap_val:>{10}.6f}"
|
||||
else:
|
||||
gap_str = f"{gap_val:>{10}.4f}"
|
||||
except (ValueError, TypeError):
|
||||
pass # Keep 'Loading...'
|
||||
|
||||
# --- REMOVED: Market Cap logic ---
|
||||
|
||||
# --- MODIFIED: Print all price columns including gap ---
|
||||
left_table_lines.append(f"{i:<2} | {coin:^6} | {formatted_bid} | {formatted_mid} | {formatted_ask} | {gap_str} |")
|
||||
left_table_lines.append("-" * left_table_width)
|
||||
|
||||
right_table_lines = ["--- Strategy Status ---"]
|
||||
# --- FIX: Adjusted table width after removing parameters ---
|
||||
right_table_width = 105
|
||||
right_table_lines.append("-" * right_table_width)
|
||||
# --- FIX: Removed 'Parameters' from header ---
|
||||
right_table_lines.append(f"{'#':^2} | {'Strategy Name':<25} | {'Coin':^6} | {'Signal':^8} | {'Signal Price':>12} | {'Last Change':>17} | {'TF':^5} | {'Size':^8} |")
|
||||
right_table_lines.append("-" * right_table_width)
|
||||
for i, (name, status) in enumerate(self.strategy_statuses.items(), 1):
|
||||
signal = status.get('current_signal', 'N/A')
|
||||
price = status.get('signal_price')
|
||||
price_display = f"{price:.4f}" if isinstance(price, (int, float)) else "-"
|
||||
last_change = status.get('last_signal_change_utc')
|
||||
last_change_display = 'Never'
|
||||
if last_change:
|
||||
dt_utc = datetime.fromisoformat(last_change.replace('Z', '+00:00')).replace(tzinfo=timezone.utc)
|
||||
dt_local = dt_utc.astimezone(None)
|
||||
last_change_display = dt_local.strftime('%Y-%m-%d %H:%M')
|
||||
|
||||
config_params = self.strategy_configs.get(name, {}).get('parameters', {})
|
||||
|
||||
# --- FIX: Read coin/size from status file first, fallback to config ---
|
||||
coin = status.get('coin', config_params.get('coin', 'N/A'))
|
||||
|
||||
# --- FIX: Handle nested 'coins_to_copy' logic for size ---
|
||||
# --- MODIFIED: Read 'size' from status first, then config, then 'Multi' ---
|
||||
size = status.get('size')
|
||||
if not size:
|
||||
if 'coins_to_copy' in config_params:
|
||||
size = 'Multi'
|
||||
else:
|
||||
size = config_params.get('size', 'N/A')
|
||||
|
||||
timeframe = config_params.get('timeframe', 'N/A')
|
||||
|
||||
# --- FIX: Removed parameter string logic ---
|
||||
|
||||
# --- FIX: Removed 'params_str' from the formatted line ---
|
||||
|
||||
size_display = f"{size:>8}"
|
||||
if isinstance(size, (int, float)):
|
||||
# --- MODIFIED: More flexible size formatting ---
|
||||
if size < 0.0001:
|
||||
size_display = f"{size:>8.6f}"
|
||||
elif size < 1:
|
||||
size_display = f"{size:>8.4f}"
|
||||
else:
|
||||
size_display = f"{size:>8.2f}"
|
||||
# --- END NEW LOGIC ---
|
||||
|
||||
right_table_lines.append(f"{i:^2} | {name:<25} | {coin:^6} | {signal:^8} | {price_display:>12} | {last_change_display:>17} | {timeframe:^5} | {size_display} |")
|
||||
right_table_lines.append("-" * right_table_width)
|
||||
|
||||
# Join lines and add a code to clear from cursor to end of screen
|
||||
# This prevents artifacts if the new output is shorter than the old one.
|
||||
final_output = "\n".join(output_lines) + "\n\x1b[J"
|
||||
print(final_output, end="")
|
||||
|
||||
# Store the number of lines printed for the next iteration
|
||||
self._lines_printed = len(output_lines)
|
||||
output_lines = []
|
||||
max_rows = max(len(left_table_lines), len(right_table_lines))
|
||||
separator = " "
|
||||
indent = " " * 10
|
||||
for i in range(max_rows):
|
||||
left_part = left_table_lines[i] if i < len(left_table_lines) else " " * left_table_width
|
||||
right_part = indent + right_table_lines[i] if i < len(right_table_lines) else ""
|
||||
output_lines.append(f"{left_part}{separator}{right_part}")
|
||||
|
||||
output_lines.append("\n--- Open Positions ---")
|
||||
pos_table_width = 100
|
||||
output_lines.append("-" * pos_table_width)
|
||||
output_lines.append(f"{'Account':<10} | {'Coin':<6} | {'Size':>15} | {'Entry Price':>12} | {'Mark Price':>12} | {'PNL':>15} | {'Leverage':>10} |")
|
||||
output_lines.append("-" * pos_table_width)
|
||||
|
||||
# --- FIX: Correctly read and display open positions ---
|
||||
if not self.open_positions:
|
||||
output_lines.append(f"{'No open positions.':^{pos_table_width}}")
|
||||
else:
|
||||
for account, positions in self.open_positions.items():
|
||||
if not positions:
|
||||
continue
|
||||
for coin, pos in positions.items():
|
||||
try:
|
||||
size_f = float(pos.get('size', 0))
|
||||
entry_f = float(pos.get('entry_price', 0))
|
||||
mark_f = float(self.prices.get(coin, 0))
|
||||
pnl_f = (mark_f - entry_f) * size_f if size_f > 0 else (entry_f - mark_f) * abs(size_f)
|
||||
lev = pos.get('leverage', 1)
|
||||
|
||||
size_str = f"{size_f:>{15}.5f}"
|
||||
entry_str = f"{entry_f:>{12}.2f}"
|
||||
mark_str = f"{mark_f:>{12}.2f}"
|
||||
pnl_str = f"{pnl_f:>{15}.2f}"
|
||||
lev_str = f"{lev}x"
|
||||
|
||||
output_lines.append(f"{account:<10} | {coin:<6} | {size_str} | {entry_str} | {mark_str} | {pnl_str} | {lev_str:>10} |")
|
||||
except (ValueError, TypeError):
|
||||
output_lines.append(f"{account:<10} | {coin:<6} | {'Error parsing data...':^{pos_table_width-20}} |")
|
||||
|
||||
output_lines.append("-" * pos_table_width)
|
||||
|
||||
final_output = "\n".join(output_lines)
|
||||
print(final_output)
|
||||
sys.stdout.flush()
|
||||
|
||||
def run(self):
|
||||
"""Main loop to read and display data."""
|
||||
"""Main loop to read data, display dashboard, and check processes."""
|
||||
while True:
|
||||
self.read_prices()
|
||||
self.get_overall_db_status()
|
||||
# --- REMOVED: self.read_market_caps() ---
|
||||
self.read_strategy_statuses()
|
||||
self.read_executor_status()
|
||||
# --- REMOVED: self.check_process_status() ---
|
||||
self.display_dashboard()
|
||||
time.sleep(2)
|
||||
|
||||
time.sleep(0.5)
|
||||
|
||||
if __name__ == "__main__":
|
||||
setup_logging('normal', 'MainApp')
|
||||
|
||||
logging.info(f"Running coin lister: '{COIN_LISTER_SCRIPT}'...")
|
||||
if not os.path.exists(LOGS_DIR):
|
||||
os.makedirs(LOGS_DIR)
|
||||
|
||||
processes = {}
|
||||
# --- REVERTED: Removed process groups ---
|
||||
|
||||
try:
|
||||
subprocess.run([sys.executable, COIN_LISTER_SCRIPT], check=True, capture_output=True, text=True)
|
||||
except subprocess.CalledProcessError as e:
|
||||
logging.error(f"Failed to run '{COIN_LISTER_SCRIPT}'. Error: {e.stderr}")
|
||||
with open(STRATEGY_CONFIG_FILE, 'r') as f:
|
||||
strategy_configs = json.load(f)
|
||||
except (FileNotFoundError, json.JSONDecodeError) as e:
|
||||
logging.error(f"Could not load strategies from '{STRATEGY_CONFIG_FILE}': {e}")
|
||||
sys.exit(1)
|
||||
|
||||
# --- FIX: Hardcoded timeframes ---
|
||||
required_timeframes = [
|
||||
"3m", "5m", "15m", "30m", "1h", "2h", "4h", "8h",
|
||||
"12h", "1d", "3d", "1w", "1M", "148m", "37m"
|
||||
]
|
||||
logging.info(f"Using fixed timeframes for resampler: {required_timeframes}")
|
||||
|
||||
with multiprocessing.Manager() as manager:
|
||||
shared_prices = manager.dict()
|
||||
# --- FIX: Create TWO queues ---
|
||||
trade_signal_queue = manager.Queue()
|
||||
order_execution_queue = manager.Queue()
|
||||
|
||||
logging.info(f"Starting market feeder ('{MARKET_FEEDER_SCRIPT}')...")
|
||||
market_process = multiprocessing.Process(target=run_market_feeder, daemon=True)
|
||||
market_process.start()
|
||||
|
||||
logging.info(f"Starting historical data fetcher ('{DATA_FETCHER_SCRIPT}')...")
|
||||
fetcher_process = multiprocessing.Process(target=data_fetcher_scheduler, daemon=True)
|
||||
fetcher_process.start()
|
||||
|
||||
# --- Restored Resampler Process Start ---
|
||||
logging.info(f"Starting resampler ('{RESAMPLER_SCRIPT}')...")
|
||||
resampler_process = multiprocessing.Process(target=resampler_scheduler, daemon=True)
|
||||
resampler_process.start()
|
||||
# --- End Resampler Process Start ---
|
||||
|
||||
time.sleep(3)
|
||||
# --- REVERTED: All processes are daemon=True and in one dict ---
|
||||
|
||||
# --- FIX: Pass WATCHED_COINS to the start_live_feed process ---
|
||||
# --- MODIFICATION: Set log level back to 'off' ---
|
||||
processes["Live Market Feed"] = multiprocessing.Process(
|
||||
target=start_live_feed,
|
||||
args=(shared_prices, WATCHED_COINS, 'off'),
|
||||
daemon=True
|
||||
)
|
||||
processes["Live Candle Fetcher"] = multiprocessing.Process(target=run_live_candle_fetcher, daemon=True)
|
||||
processes["Resampler"] = multiprocessing.Process(target=resampler_scheduler, args=(list(required_timeframes),), daemon=True)
|
||||
# --- REMOVED: Market Cap Fetcher Process ---
|
||||
processes["Dashboard Data"] = multiprocessing.Process(target=run_dashboard_data_fetcher, daemon=True)
|
||||
|
||||
app = MainApp(coins_to_watch=WATCHED_COINS)
|
||||
try:
|
||||
app.run()
|
||||
except KeyboardInterrupt:
|
||||
logging.info("Shutting down...")
|
||||
market_process.terminate()
|
||||
fetcher_process.terminate()
|
||||
# --- Restored Resampler Shutdown ---
|
||||
resampler_process.terminate()
|
||||
market_process.join()
|
||||
fetcher_process.join()
|
||||
resampler_process.join()
|
||||
# --- End Resampler Shutdown ---
|
||||
processes["Position Manager"] = multiprocessing.Process(
|
||||
target=run_position_manager,
|
||||
args=(trade_signal_queue, order_execution_queue),
|
||||
daemon=True
|
||||
)
|
||||
processes["Trade Executor"] = multiprocessing.Process(
|
||||
target=run_trade_executor,
|
||||
args=(order_execution_queue,),
|
||||
daemon=True
|
||||
)
|
||||
|
||||
for name, config in strategy_configs.items():
|
||||
if config.get("enabled", False):
|
||||
if 'class' not in config:
|
||||
logging.error(f"Strategy '{name}' is missing 'class' key. Skipping.")
|
||||
continue
|
||||
proc = multiprocessing.Process(target=run_strategy, args=(name, config, trade_signal_queue), daemon=True)
|
||||
processes[f"Strategy: {name}"] = proc # Add to strategy group
|
||||
|
||||
# --- REVERTED: Removed combined dict ---
|
||||
|
||||
for name, proc in processes.items():
|
||||
logging.info(f"Starting process '{name}'...")
|
||||
proc.start()
|
||||
|
||||
time.sleep(3)
|
||||
|
||||
app = MainApp(coins_to_watch=WATCHED_COINS, processes=processes, strategy_configs=strategy_configs, shared_prices=shared_prices)
|
||||
try:
|
||||
app.run()
|
||||
except KeyboardInterrupt:
|
||||
# --- MODIFIED: Staged shutdown ---
|
||||
logging.info("Shutting down...")
|
||||
|
||||
strategy_procs = {}
|
||||
other_procs = {}
|
||||
for name, proc in processes.items():
|
||||
if name.startswith("Strategy:"):
|
||||
strategy_procs[name] = proc
|
||||
else:
|
||||
other_procs[name] = proc
|
||||
|
||||
# --- 1. Terminate strategy processes ---
|
||||
logging.info("Shutting down strategy processes first...")
|
||||
for name, proc in strategy_procs.items():
|
||||
if proc.is_alive():
|
||||
logging.info(f"Terminating process: '{name}'...")
|
||||
proc.terminate()
|
||||
|
||||
# --- 2. Wait for 5 seconds ---
|
||||
logging.info("Waiting 5 seconds for strategies to close...")
|
||||
time.sleep(5)
|
||||
|
||||
# --- 3. Terminate all other processes ---
|
||||
logging.info("Shutting down remaining core processes...")
|
||||
for name, proc in other_procs.items():
|
||||
if proc.is_alive():
|
||||
logging.info(f"Terminating process: '{name}'...")
|
||||
proc.terminate()
|
||||
|
||||
# --- 4. Join all processes (strategies and others) ---
|
||||
logging.info("Waiting for all processes to join...")
|
||||
for name, proc in processes.items(): # Iterate over the original dict to get all
|
||||
if proc.is_alive():
|
||||
logging.info(f"Waiting for process '{name}' to join...")
|
||||
proc.join(timeout=5) # Wait up to 5 seconds
|
||||
if proc.is_alive():
|
||||
# If it's still alive, force kill
|
||||
logging.warning(f"Process '{name}' did not terminate, forcing kill.")
|
||||
proc.kill()
|
||||
# --- END MODIFIED ---
|
||||
|
||||
logging.info("Shutdown complete.")
|
||||
sys.exit(0)
|
||||
|
||||
|
||||
|
||||
|
||||
321
market_cap_fetcher.py
Normal file
321
market_cap_fetcher.py
Normal file
@ -0,0 +1,321 @@
|
||||
import argparse
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import sqlite3
|
||||
import pandas as pd
|
||||
import requests
|
||||
import time
|
||||
from datetime import datetime, timezone, timedelta
|
||||
import json
|
||||
from dotenv import load_dotenv
|
||||
|
||||
load_dotenv()
|
||||
|
||||
from logging_utils import setup_logging
|
||||
|
||||
class MarketCapFetcher:
|
||||
"""
|
||||
Fetches historical daily market cap data from the CoinGecko API and
|
||||
intelligently upserts it into the SQLite database for all coins.
|
||||
"""
|
||||
|
||||
def __init__(self, log_level: str):
|
||||
setup_logging(log_level, 'MarketCapFetcher')
|
||||
self.db_path = os.path.join("_data", "market_data.db")
|
||||
self.api_base_url = "https://api.coingecko.com/api/v3"
|
||||
self.api_key = os.environ.get("COINGECKO_API_KEY")
|
||||
if not self.api_key:
|
||||
logging.error("CoinGecko API key not found. Please set the COINGECKO_API_KEY environment variable.")
|
||||
sys.exit(1)
|
||||
|
||||
self.COIN_ID_MAP = self._load_coin_id_map()
|
||||
if not self.COIN_ID_MAP:
|
||||
logging.error("Coin ID map is empty. Run 'update_coin_map.py' to generate it.")
|
||||
sys.exit(1)
|
||||
|
||||
self.coins_to_fetch = list(self.COIN_ID_MAP.keys())
|
||||
|
||||
self.STABLECOIN_ID_MAP = {
|
||||
"USDT": "tether", "USDC": "usd-coin", "USDE": "ethena-usde",
|
||||
"DAI": "dai", "PYUSD": "paypal-usd"
|
||||
}
|
||||
|
||||
self._ensure_tables_exist()
|
||||
|
||||
def _ensure_tables_exist(self):
|
||||
"""Ensures all market cap tables exist with timestamp_ms as PRIMARY KEY."""
|
||||
all_tables_to_check = [f"{coin}_market_cap" for coin in self.coins_to_fetch]
|
||||
all_tables_to_check.extend(["STABLECOINS_market_cap", "TOTAL_market_cap_daily"])
|
||||
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
for table_name in all_tables_to_check:
|
||||
cursor = conn.cursor()
|
||||
cursor.execute(f"PRAGMA table_info('{table_name}')")
|
||||
columns = cursor.fetchall()
|
||||
if columns:
|
||||
pk_found = any(col[1] == 'timestamp_ms' and col[5] == 1 for col in columns)
|
||||
if not pk_found:
|
||||
logging.warning(f"Schema for table '{table_name}' is incorrect. Dropping and recreating table.")
|
||||
try:
|
||||
conn.execute(f'DROP TABLE "{table_name}"')
|
||||
self._create_market_cap_table(conn, table_name)
|
||||
logging.info(f"Successfully recreated schema for '{table_name}'.")
|
||||
except Exception as e:
|
||||
logging.error(f"FATAL: Failed to recreate table '{table_name}': {e}. Please delete 'market_data.db' and restart.")
|
||||
sys.exit(1)
|
||||
else:
|
||||
self._create_market_cap_table(conn, table_name)
|
||||
logging.info("All market cap table schemas verified.")
|
||||
|
||||
def _create_market_cap_table(self, conn, table_name):
|
||||
"""Creates a new market cap table with the correct schema."""
|
||||
conn.execute(f'''
|
||||
CREATE TABLE IF NOT EXISTS "{table_name}" (
|
||||
datetime_utc TEXT,
|
||||
timestamp_ms INTEGER PRIMARY KEY,
|
||||
market_cap REAL
|
||||
)
|
||||
''')
|
||||
|
||||
def _load_coin_id_map(self) -> dict:
|
||||
"""Loads the dynamically generated coin-to-id mapping."""
|
||||
map_file_path = os.path.join("_data", "coin_id_map.json")
|
||||
try:
|
||||
with open(map_file_path, 'r') as f:
|
||||
return json.load(f)
|
||||
except (FileNotFoundError, json.JSONDecodeError) as e:
|
||||
logging.error(f"Could not load '{map_file_path}'. Please run 'update_coin_map.py' first. Error: {e}")
|
||||
return {}
|
||||
|
||||
def _upsert_market_cap_data(self, conn, table_name: str, df: pd.DataFrame):
|
||||
"""Upserts a DataFrame of market cap data into the specified table."""
|
||||
if df.empty:
|
||||
return
|
||||
|
||||
records_to_upsert = []
|
||||
for index, row in df.iterrows():
|
||||
records_to_upsert.append((
|
||||
row['datetime_utc'].strftime('%Y-%m-%d %H:%M:%S'),
|
||||
row['timestamp_ms'],
|
||||
row['market_cap']
|
||||
))
|
||||
|
||||
cursor = conn.cursor()
|
||||
cursor.executemany(f'''
|
||||
INSERT OR REPLACE INTO "{table_name}" (datetime_utc, timestamp_ms, market_cap)
|
||||
VALUES (?, ?, ?)
|
||||
''', records_to_upsert)
|
||||
conn.commit()
|
||||
logging.info(f"Successfully upserted {len(records_to_upsert)} records into '{table_name}'.")
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
Main execution function to process all configured coins and update the database.
|
||||
"""
|
||||
logging.info("Starting historical market cap fetch process from CoinGecko...")
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
conn.execute("PRAGMA journal_mode=WAL;")
|
||||
|
||||
for coin_symbol in self.coins_to_fetch:
|
||||
coin_id = self.COIN_ID_MAP.get(coin_symbol.upper())
|
||||
if not coin_id:
|
||||
logging.warning(f"No CoinGecko ID found for '{coin_symbol}'. Skipping.")
|
||||
continue
|
||||
logging.info(f"--- Processing {coin_symbol} ({coin_id}) ---")
|
||||
try:
|
||||
self._update_market_cap_for_coin(coin_id, coin_symbol, conn)
|
||||
except Exception as e:
|
||||
logging.error(f"An unexpected error occurred while processing {coin_symbol}: {e}")
|
||||
time.sleep(2)
|
||||
|
||||
self._update_stablecoin_aggregate(conn)
|
||||
self._update_total_market_cap(conn)
|
||||
self._save_summary(conn)
|
||||
|
||||
logging.info("--- Market cap fetch process complete ---")
|
||||
|
||||
def _save_summary(self, conn):
|
||||
# ... (This function is unchanged)
|
||||
logging.info("--- Generating Market Cap Summary ---")
|
||||
summary_data = {}
|
||||
summary_file_path = os.path.join("_data", "market_cap_data.json")
|
||||
try:
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("SELECT name FROM sqlite_master WHERE type='table' AND (name LIKE '%_market_cap' OR name LIKE 'TOTAL_%');")
|
||||
tables = [row[0] for row in cursor.fetchall()]
|
||||
for table_name in tables:
|
||||
try:
|
||||
df_last = pd.read_sql(f'SELECT * FROM "{table_name}" ORDER BY datetime_utc DESC LIMIT 1', conn)
|
||||
if not df_last.empty:
|
||||
summary_data[table_name] = df_last.to_dict('records')[0]
|
||||
except Exception as e:
|
||||
logging.error(f"Could not read last record from table '{table_name}': {e}")
|
||||
if summary_data:
|
||||
summary_data['summary_last_updated_utc'] = datetime.now(timezone.utc).isoformat()
|
||||
with open(summary_file_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(summary_data, f, indent=4)
|
||||
logging.info(f"Successfully saved market cap summary to '{summary_file_path}'")
|
||||
else:
|
||||
logging.warning("No data found to create a summary.")
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to generate summary: {e}")
|
||||
|
||||
def _update_total_market_cap(self, conn):
|
||||
"""Fetches the current total market cap and upserts it for the current date."""
|
||||
logging.info("--- Processing Total Market Cap ---")
|
||||
table_name = "TOTAL_market_cap_daily"
|
||||
try:
|
||||
today_date = datetime.now(timezone.utc).date()
|
||||
today_dt = pd.to_datetime(today_date)
|
||||
today_ts = int(today_dt.timestamp() * 1000)
|
||||
|
||||
logging.info("Fetching current global market data...")
|
||||
url = f"{self.api_base_url}/global"
|
||||
headers = {"x-cg-demo-api-key": self.api_key}
|
||||
response = requests.get(url, headers=headers)
|
||||
response.raise_for_status()
|
||||
global_data = response.json().get('data', {})
|
||||
total_mc = global_data.get('total_market_cap', {}).get('usd')
|
||||
|
||||
if total_mc:
|
||||
df_total = pd.DataFrame([{
|
||||
'datetime_utc': today_dt,
|
||||
'timestamp_ms': today_ts,
|
||||
'market_cap': total_mc
|
||||
}])
|
||||
self._upsert_market_cap_data(conn, table_name, df_total)
|
||||
logging.info(f"Saved total market cap for {today_date}: ${total_mc:,.2f}")
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
logging.error(f"Failed to fetch global market data: {e}")
|
||||
except Exception as e:
|
||||
logging.error(f"An error occurred while updating total market cap: {e}")
|
||||
|
||||
def _update_stablecoin_aggregate(self, conn):
|
||||
"""Fetches data for all stablecoins and saves the aggregated market cap."""
|
||||
logging.info("--- Processing aggregated stablecoin market cap ---")
|
||||
all_stablecoin_df = pd.DataFrame()
|
||||
|
||||
for symbol, coin_id in self.STABLECOIN_ID_MAP.items():
|
||||
logging.info(f"Fetching historical data for stablecoin: {symbol}...")
|
||||
df = self._fetch_historical_data(coin_id, days=365)
|
||||
if not df.empty:
|
||||
all_stablecoin_df = pd.concat([all_stablecoin_df, df])
|
||||
time.sleep(2)
|
||||
|
||||
if all_stablecoin_df.empty:
|
||||
logging.warning("No data fetched for any stablecoins. Cannot create aggregate.")
|
||||
return
|
||||
|
||||
aggregated_df = all_stablecoin_df.groupby('timestamp_ms').agg(
|
||||
datetime_utc=('datetime_utc', 'first'),
|
||||
market_cap=('market_cap', 'sum')
|
||||
).reset_index()
|
||||
|
||||
table_name = "STABLECOINS_market_cap"
|
||||
last_date_in_db = self._get_last_date_from_db(table_name, conn, is_timestamp_ms=True)
|
||||
|
||||
if last_date_in_db:
|
||||
aggregated_df = aggregated_df[aggregated_df['timestamp_ms'] > last_date_in_db]
|
||||
|
||||
if not aggregated_df.empty:
|
||||
self._upsert_market_cap_data(conn, table_name, aggregated_df)
|
||||
else:
|
||||
logging.info("Aggregated stablecoin data is already up-to-date.")
|
||||
|
||||
def _update_market_cap_for_coin(self, coin_id: str, coin_symbol: str, conn):
|
||||
"""Fetches and appends new market cap data for a single coin."""
|
||||
table_name = f"{coin_symbol}_market_cap"
|
||||
last_date_in_db = self._get_last_date_from_db(table_name, conn, is_timestamp_ms=True)
|
||||
|
||||
days_to_fetch = 365
|
||||
if last_date_in_db:
|
||||
delta_days = (datetime.now(timezone.utc) - datetime.fromtimestamp(last_date_in_db/1000, tz=timezone.utc)).days
|
||||
if delta_days <= 0:
|
||||
logging.info(f"Market cap data for '{coin_symbol}' is already up-to-date.")
|
||||
return
|
||||
days_to_fetch = min(delta_days + 1, 365)
|
||||
else:
|
||||
logging.info(f"No existing data found. Fetching initial {days_to_fetch} days for {coin_symbol}.")
|
||||
|
||||
df = self._fetch_historical_data(coin_id, days=days_to_fetch)
|
||||
|
||||
if df.empty:
|
||||
logging.warning(f"No market cap data returned from API for {coin_symbol}.")
|
||||
return
|
||||
|
||||
if last_date_in_db:
|
||||
df = df[df['timestamp_ms'] > last_date_in_db]
|
||||
|
||||
if not df.empty:
|
||||
self._upsert_market_cap_data(conn, table_name, df)
|
||||
else:
|
||||
logging.info(f"Data was fetched, but no new records needed saving for '{coin_symbol}'.")
|
||||
|
||||
def _get_last_date_from_db(self, table_name: str, conn, is_timestamp_ms: bool = False):
|
||||
"""Gets the most recent date or timestamp from a market cap table."""
|
||||
try:
|
||||
cursor = conn.cursor()
|
||||
cursor.execute(f"SELECT name FROM sqlite_master WHERE type='table' AND name='{table_name}';")
|
||||
if not cursor.fetchone():
|
||||
return None
|
||||
|
||||
col_to_query = "timestamp_ms" if is_timestamp_ms else "datetime_utc"
|
||||
last_val = pd.read_sql(f'SELECT MAX({col_to_query}) FROM "{table_name}"', conn).iloc[0, 0]
|
||||
|
||||
if pd.isna(last_val):
|
||||
return None
|
||||
if is_timestamp_ms:
|
||||
return int(last_val)
|
||||
return pd.to_datetime(last_val)
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Could not read last date from table '{table_name}': {e}")
|
||||
return None
|
||||
|
||||
def _fetch_historical_data(self, coin_id: str, days: int) -> pd.DataFrame:
|
||||
"""Fetches historical market chart data from CoinGecko for a specified number of days."""
|
||||
url = f"{self.api_base_url}/coins/{coin_id}/market_chart"
|
||||
params = { "vs_currency": "usd", "days": days, "interval": "daily" }
|
||||
headers = {"x-cg-demo-api-key": self.api_key}
|
||||
|
||||
try:
|
||||
logging.debug(f"Fetching last {days} days for {coin_id}...")
|
||||
response = requests.get(url, headers=headers, params=params)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
|
||||
market_caps = data.get('market_caps', [])
|
||||
if not market_caps: return pd.DataFrame()
|
||||
|
||||
df = pd.DataFrame(market_caps, columns=['timestamp_ms', 'market_cap'])
|
||||
|
||||
# --- FIX: Normalize all timestamps to the start of the day (00:00:00 UTC) ---
|
||||
# This prevents duplicate entries for the same day (e.g., a "live" candle vs. the daily one)
|
||||
df['datetime_utc'] = pd.to_datetime(df['timestamp_ms'], unit='ms').dt.normalize()
|
||||
|
||||
# Recalculate the timestamp_ms to match the normalized 00:00:00 datetime
|
||||
df['timestamp_ms'] = (df['datetime_utc'].astype('int64') // 10**6)
|
||||
|
||||
df.drop_duplicates(subset=['timestamp_ms'], keep='last', inplace=True)
|
||||
return df[['datetime_utc', 'timestamp_ms', 'market_cap']]
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
logging.error(f"API request failed for {coin_id}: {e}.")
|
||||
return pd.DataFrame()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Fetch historical market cap data from CoinGecko.")
|
||||
parser.add_argument(
|
||||
"--log-level",
|
||||
default="normal",
|
||||
choices=['off', 'normal', 'debug'],
|
||||
help="Set the logging level for the script."
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
fetcher = MarketCapFetcher(log_level=args.log_level)
|
||||
fetcher.run()
|
||||
|
||||
2
position_logic/__init__.py
Normal file
2
position_logic/__init__.py
Normal file
@ -0,0 +1,2 @@
|
||||
# This file can be empty.
|
||||
# It tells Python that 'position_logic' is a directory containing modules.
|
||||
31
position_logic/base_logic.py
Normal file
31
position_logic/base_logic.py
Normal file
@ -0,0 +1,31 @@
|
||||
from abc import ABC, abstractmethod
|
||||
import logging
|
||||
|
||||
class BasePositionLogic(ABC):
|
||||
"""
|
||||
Abstract base class for all strategy-specific position logic.
|
||||
Defines the interface for how the PositionManager interacts with logic modules.
|
||||
"""
|
||||
def __init__(self, strategy_name: str, send_order_callback, log_trade_callback):
|
||||
self.strategy_name = strategy_name
|
||||
self.send_order = send_order_callback
|
||||
self.log_trade = log_trade_callback
|
||||
logging.info(f"Initialized position logic for '{strategy_name}'")
|
||||
|
||||
@abstractmethod
|
||||
def handle_signal(self, signal_data: dict, current_strategy_positions: dict) -> dict:
|
||||
"""
|
||||
The core logic method. This is called by the PositionManager when a
|
||||
new signal arrives for this strategy.
|
||||
|
||||
Args:
|
||||
signal_data: The full signal dictionary from the strategy.
|
||||
current_strategy_positions: A dict of this strategy's current positions,
|
||||
keyed by coin (e.g., {"BTC": {"side": "long", ...}}).
|
||||
|
||||
Returns:
|
||||
A dictionary representing the new state for the *specific coin* in the
|
||||
signal (e.g., {"side": "long", "size": 0.1}).
|
||||
Return None to indicate the position for this coin should be closed/removed.
|
||||
"""
|
||||
pass
|
||||
83
position_logic/default_flip_logic.py
Normal file
83
position_logic/default_flip_logic.py
Normal file
@ -0,0 +1,83 @@
|
||||
import logging
|
||||
from position_logic.base_logic import BasePositionLogic
|
||||
|
||||
class DefaultFlipLogic(BasePositionLogic):
|
||||
"""
|
||||
The standard "flip-on-signal" logic used by most simple strategies
|
||||
(SMA, MA Cross, and even the per-coin Copy Trader signals).
|
||||
|
||||
- BUY signal: Closes any short, opens a long.
|
||||
- SELL signal: Closes any long, opens a short.
|
||||
- FLAT signal: Closes any open position.
|
||||
"""
|
||||
def handle_signal(self, signal_data: dict, current_strategy_positions: dict) -> dict:
|
||||
"""
|
||||
Processes a BUY, SELL, or FLAT signal and issues the necessary orders
|
||||
to flip or open a position.
|
||||
"""
|
||||
name = self.strategy_name
|
||||
params = signal_data['config']['parameters']
|
||||
coin = signal_data['coin']
|
||||
desired_signal = signal_data['signal']
|
||||
signal_price = signal_data.get('signal_price', 0)
|
||||
|
||||
size = params.get('size')
|
||||
leverage_long = int(params.get('leverage_long', 2))
|
||||
leverage_short = int(params.get('leverage_short', 2))
|
||||
agent_name = signal_data['config'].get("agent", "default").lower()
|
||||
|
||||
# --- This logic now correctly targets a specific coin ---
|
||||
current_position = current_strategy_positions.get(coin)
|
||||
new_position_state = None # Return None to close position
|
||||
|
||||
if desired_signal == "BUY" or desired_signal == "INIT_BUY":
|
||||
new_position_state = {"coin": coin, "side": "long", "size": size}
|
||||
|
||||
if not current_position:
|
||||
logging.warning(f"[{name}]-[{coin}] ACTION: Setting leverage to {leverage_long}x and opening LONG.")
|
||||
self.send_order(agent_name, "update_leverage", coin, is_buy=True, size=leverage_long)
|
||||
self.send_order(agent_name, "market_open", coin, is_buy=True, size=size)
|
||||
self.log_trade(strategy=name, coin=coin, action="OPEN_LONG", price=signal_price, size=size, signal=desired_signal)
|
||||
|
||||
elif current_position['side'] == 'short':
|
||||
logging.warning(f"[{name}]-[{coin}] ACTION: Closing SHORT and opening LONG with {leverage_long}x leverage.")
|
||||
self.send_order(agent_name, "update_leverage", coin, is_buy=True, size=leverage_long)
|
||||
self.send_order(agent_name, "market_open", coin, is_buy=True, size=current_position['size'], reduce_only=True)
|
||||
self.log_trade(strategy=name, coin=coin, action="CLOSE_SHORT", price=signal_price, size=current_position['size'], signal=desired_signal)
|
||||
self.send_order(agent_name, "market_open", coin, is_buy=True, size=size)
|
||||
self.log_trade(strategy=name, coin=coin, action="OPEN_LONG", price=signal_price, size=size, signal=desired_signal)
|
||||
|
||||
else: # Already long, do nothing
|
||||
logging.info(f"[{name}]-[{coin}] INFO: Already LONG, no action taken.")
|
||||
new_position_state = current_position # State is unchanged
|
||||
|
||||
elif desired_signal == "SELL" or desired_signal == "INIT_SELL":
|
||||
new_position_state = {"coin": coin, "side": "short", "size": size}
|
||||
|
||||
if not current_position:
|
||||
logging.warning(f"[{name}]-[{coin}] ACTION: Setting leverage to {leverage_short}x and opening SHORT.")
|
||||
self.send_order(agent_name, "update_leverage", coin, is_buy=False, size=leverage_short)
|
||||
self.send_order(agent_name, "market_open", coin, is_buy=False, size=size)
|
||||
self.log_trade(strategy=name, coin=coin, action="OPEN_SHORT", price=signal_price, size=size, signal=desired_signal)
|
||||
|
||||
elif current_position['side'] == 'long':
|
||||
logging.warning(f"[{name}]-[{coin}] ACTION: Closing LONG and opening SHORT with {leverage_short}x leverage.")
|
||||
self.send_order(agent_name, "update_leverage", coin, is_buy=False, size=leverage_short)
|
||||
self.send_order(agent_name, "market_open", coin, is_buy=False, size=current_position['size'], reduce_only=True)
|
||||
self.log_trade(strategy=name, coin=coin, action="CLOSE_LONG", price=signal_price, size=current_position['size'], signal=desired_signal)
|
||||
self.send_order(agent_name, "market_open", coin, is_buy=False, size=size)
|
||||
self.log_trade(strategy=name, coin=coin, action="OPEN_SHORT", price=signal_price, size=size, signal=desired_signal)
|
||||
|
||||
else: # Already short, do nothing
|
||||
logging.info(f"[{name}]-[{coin}] INFO: Already SHORT, no action taken.")
|
||||
new_position_state = current_position # State is unchanged
|
||||
|
||||
elif desired_signal == "FLAT":
|
||||
if current_position:
|
||||
logging.warning(f"[{name}]-[{coin}] ACTION: Close {current_position['side']} position.")
|
||||
is_buy = current_position['side'] == 'short' # To close a short, we buy
|
||||
self.send_order(agent_name, "market_open", coin, is_buy=is_buy, size=current_position['size'], reduce_only=True)
|
||||
self.log_trade(strategy=name, coin=coin, action=f"CLOSE_{current_position['side'].upper()}", price=signal_price, size=current_position['size'], signal=desired_signal)
|
||||
# new_position_state is already None, which will remove it
|
||||
|
||||
return new_position_state
|
||||
170
position_manager.py
Normal file
170
position_manager.py
Normal file
@ -0,0 +1,170 @@
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import time
|
||||
import multiprocessing
|
||||
import numpy as np # Import numpy to handle np.float64
|
||||
|
||||
from logging_utils import setup_logging
|
||||
from trade_log import log_trade
|
||||
|
||||
class PositionManager:
|
||||
"""
|
||||
(Stateless) Listens for EXPLICIT signals (e.g., "OPEN_LONG") from all
|
||||
strategies and converts them into specific execution orders
|
||||
(e.g., "market_open") for the TradeExecutor.
|
||||
|
||||
It holds NO position state.
|
||||
"""
|
||||
|
||||
def __init__(self, log_level: str, trade_signal_queue: multiprocessing.Queue, order_execution_queue: multiprocessing.Queue):
|
||||
# Note: Logging is set up by the run_position_manager function
|
||||
|
||||
self.trade_signal_queue = trade_signal_queue
|
||||
self.order_execution_queue = order_execution_queue
|
||||
|
||||
# --- REMOVED: All state management ---
|
||||
|
||||
logging.info("Position Manager (Stateless) started.")
|
||||
|
||||
# --- REMOVED: _load_managed_positions method ---
|
||||
# --- REMOVED: _save_managed_positions method ---
|
||||
# --- REMOVED: All tick/rounding/meta logic ---
|
||||
|
||||
def send_order(self, agent: str, action: str, coin: str, is_buy: bool, size: float, reduce_only: bool = False, limit_px=None, sl_px=None, tp_px=None):
|
||||
"""Helper function to put a standardized order onto the execution queue."""
|
||||
order_data = {
|
||||
"agent": agent,
|
||||
"action": action,
|
||||
"coin": coin,
|
||||
"is_buy": is_buy,
|
||||
"size": size,
|
||||
"reduce_only": reduce_only,
|
||||
"limit_px": limit_px,
|
||||
"sl_px": sl_px,
|
||||
"tp_px": tp_px,
|
||||
}
|
||||
logging.info(f"Sending order to executor: {order_data}")
|
||||
self.order_execution_queue.put(order_data)
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
Main execution loop. Blocks and waits for a signal from the queue.
|
||||
Converts explicit strategy signals into execution orders.
|
||||
"""
|
||||
logging.info("Position Manager started. Waiting for signals...")
|
||||
while True:
|
||||
try:
|
||||
trade_signal = self.trade_signal_queue.get()
|
||||
if not trade_signal:
|
||||
continue
|
||||
|
||||
logging.info(f"Received signal: {trade_signal}")
|
||||
|
||||
name = trade_signal['strategy_name']
|
||||
config = trade_signal['config']
|
||||
params = config['parameters']
|
||||
coin = trade_signal['coin'].upper()
|
||||
|
||||
# --- NEW: The signal is now the explicit action ---
|
||||
desired_signal = trade_signal['signal']
|
||||
|
||||
status = trade_signal
|
||||
|
||||
signal_price = status.get('signal_price')
|
||||
if isinstance(signal_price, np.float64):
|
||||
signal_price = float(signal_price)
|
||||
|
||||
if not signal_price or signal_price <= 0:
|
||||
logging.warning(f"[{name}] Signal received with invalid or missing price ({signal_price}). Skipping.")
|
||||
continue
|
||||
|
||||
# --- This logic is still needed for copy_trader's nested config ---
|
||||
# --- But ONLY for finding leverage, not size ---
|
||||
if 'coins_to_copy' in params:
|
||||
logging.info(f"[{name}] Detected 'coins_to_copy'. Entering copy_trader logic...")
|
||||
matching_coin_key = None
|
||||
for key in params['coins_to_copy'].keys():
|
||||
if key.upper() == coin:
|
||||
matching_coin_key = key
|
||||
break
|
||||
|
||||
if matching_coin_key:
|
||||
coin_specific_config = params['coins_to_copy'][matching_coin_key]
|
||||
else:
|
||||
coin_specific_config = {}
|
||||
|
||||
# --- REMOVED: size = coin_specific_config.get('size') ---
|
||||
|
||||
params['leverage_long'] = coin_specific_config.get('leverage_long', 2)
|
||||
params['leverage_short'] = coin_specific_config.get('leverage_short', 2)
|
||||
|
||||
# --- FIX: Read the size from the ROOT of the trade signal ---
|
||||
size = trade_signal.get('size')
|
||||
if not size or size <= 0:
|
||||
logging.error(f"[{name}] Signal received with no 'size' or invalid size ({size}). Skipping trade.")
|
||||
continue
|
||||
# --- END FIX ---
|
||||
|
||||
leverage_long = int(params.get('leverage_long', 2))
|
||||
leverage_short = int(params.get('leverage_short', 2))
|
||||
|
||||
agent_name = (config.get("agent") or "default").lower()
|
||||
|
||||
logging.info(f"[{name}] Agent set to: {agent_name}")
|
||||
|
||||
# --- REMOVED: current_position check ---
|
||||
|
||||
# --- Use pure signal_price directly for the limit_px ---
|
||||
limit_px = signal_price
|
||||
logging.info(f"[{name}] Using pure signal price for limit_px: {limit_px}")
|
||||
|
||||
# --- NEW: Stateless Signal-to-Order Conversion ---
|
||||
|
||||
if desired_signal == "OPEN_LONG":
|
||||
logging.warning(f"[{name}] ACTION: Opening LONG for {coin}.")
|
||||
# --- REMOVED: Leverage update signal ---
|
||||
self.send_order(agent_name, "market_open", coin, True, size, limit_px=limit_px)
|
||||
log_trade(strategy=name, coin=coin, action="OPEN_LONG", price=signal_price, size=size, signal=desired_signal)
|
||||
|
||||
elif desired_signal == "OPEN_SHORT":
|
||||
logging.warning(f"[{name}] ACTION: Opening SHORT for {coin}.")
|
||||
# --- REMOVED: Leverage update signal ---
|
||||
self.send_order(agent_name, "market_open", coin, False, size, limit_px=limit_px)
|
||||
log_trade(strategy=name, coin=coin, action="OPEN_SHORT", price=signal_price, size=size, signal=desired_signal)
|
||||
|
||||
elif desired_signal == "CLOSE_LONG":
|
||||
logging.warning(f"[{name}] ACTION: Closing LONG position for {coin}.")
|
||||
# A "market_close" for a LONG is a SELL order
|
||||
self.send_order(agent_name, "market_close", coin, False, size, limit_px=limit_px)
|
||||
log_trade(strategy=name, coin=coin, action="CLOSE_LONG", price=signal_price, size=size, signal=desired_signal)
|
||||
|
||||
elif desired_signal == "CLOSE_SHORT":
|
||||
logging.warning(f"[{name}] ACTION: Closing SHORT position for {coin}.")
|
||||
# A "market_close" for a SHORT is a BUY order
|
||||
self.send_order(agent_name, "market_close", coin, True, size, limit_px=limit_px)
|
||||
log_trade(strategy=name, coin=coin, action="CLOSE_SHORT", price=signal_price, size=size, signal=desired_signal)
|
||||
|
||||
# --- NEW: Handle leverage update signals ---
|
||||
elif desired_signal == "UPDATE_LEVERAGE_LONG":
|
||||
logging.warning(f"[{name}] ACTION: Updating LONG leverage for {coin} to {size}x")
|
||||
# 'size' field holds the leverage value for this signal
|
||||
self.send_order(agent_name, "update_leverage", coin, True, size)
|
||||
|
||||
elif desired_signal == "UPDATE_LEVERAGE_SHORT":
|
||||
logging.warning(f"[{name}] ACTION: Updating SHORT leverage for {coin} to {size}x")
|
||||
# 'size' field holds the leverage value for this signal
|
||||
self.send_order(agent_name, "update_leverage", coin, False, size)
|
||||
|
||||
else:
|
||||
logging.warning(f"[{name}] Received unknown signal '{desired_signal}'. No action taken.")
|
||||
|
||||
# --- REMOVED: _save_managed_positions() ---
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"An error occurred in the position manager loop: {e}", exc_info=True)
|
||||
time.sleep(1)
|
||||
|
||||
# This script is no longer run directly, but is called by main_app.py
|
||||
|
||||
159
position_monitor.py
Normal file
159
position_monitor.py
Normal file
@ -0,0 +1,159 @@
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import json
|
||||
import argparse
|
||||
from datetime import datetime, timezone
|
||||
from hyperliquid.info import Info
|
||||
from hyperliquid.utils import constants
|
||||
from dotenv import load_dotenv
|
||||
import logging
|
||||
|
||||
from logging_utils import setup_logging
|
||||
|
||||
# Load .env file
|
||||
load_dotenv()
|
||||
|
||||
class PositionMonitor:
|
||||
"""
|
||||
A standalone, read-only dashboard for monitoring all open perpetuals
|
||||
positions, spot balances, and their associated strategies.
|
||||
"""
|
||||
|
||||
def __init__(self, log_level: str):
|
||||
setup_logging(log_level, 'PositionMonitor')
|
||||
|
||||
self.wallet_address = os.environ.get("MAIN_WALLET_ADDRESS")
|
||||
if not self.wallet_address:
|
||||
logging.error("MAIN_WALLET_ADDRESS not set in .env file. Cannot proceed.")
|
||||
sys.exit(1)
|
||||
|
||||
self.info = Info(constants.MAINNET_API_URL, skip_ws=True)
|
||||
self.managed_positions_path = os.path.join("_data", "executor_managed_positions.json")
|
||||
self._lines_printed = 0
|
||||
|
||||
logging.info(f"Monitoring vault address: {self.wallet_address}")
|
||||
|
||||
def load_managed_positions(self) -> dict:
|
||||
"""Loads the state of which strategy manages which position."""
|
||||
if os.path.exists(self.managed_positions_path):
|
||||
try:
|
||||
with open(self.managed_positions_path, 'r') as f:
|
||||
# Create a reverse map: {coin: strategy_name}
|
||||
data = json.load(f)
|
||||
return {v['coin']: k for k, v in data.items()}
|
||||
except (IOError, json.JSONDecodeError):
|
||||
logging.warning("Could not read managed positions file.")
|
||||
return {}
|
||||
|
||||
def run(self):
|
||||
"""Main loop to continuously refresh the dashboard."""
|
||||
try:
|
||||
while True:
|
||||
self.display_dashboard()
|
||||
time.sleep(5) # Refresh every 5 seconds
|
||||
except KeyboardInterrupt:
|
||||
logging.info("Position monitor stopped.")
|
||||
|
||||
def display_dashboard(self):
|
||||
"""Fetches all data and draws the dashboard without blinking."""
|
||||
if self._lines_printed > 0:
|
||||
print(f"\x1b[{self._lines_printed}A", end="")
|
||||
|
||||
output_lines = []
|
||||
try:
|
||||
perp_state = self.info.user_state(self.wallet_address)
|
||||
spot_state = self.info.spot_user_state(self.wallet_address)
|
||||
coin_to_strategy_map = self.load_managed_positions()
|
||||
|
||||
output_lines.append(f"--- Live Position Monitor for {self.wallet_address[:6]}...{self.wallet_address[-4:]} ---")
|
||||
|
||||
# --- 1. Perpetuals Account Summary ---
|
||||
margin_summary = perp_state.get('marginSummary', {})
|
||||
account_value = float(margin_summary.get('accountValue', 0))
|
||||
margin_used = float(margin_summary.get('totalMarginUsed', 0))
|
||||
utilization = (margin_used / account_value) * 100 if account_value > 0 else 0
|
||||
|
||||
output_lines.append("\n--- Perpetuals Account Summary ---")
|
||||
output_lines.append(f" Account Value: ${account_value:,.2f} | Margin Used: ${margin_used:,.2f} | Utilization: {utilization:.2f}%")
|
||||
|
||||
# --- 2. Spot Balances Summary ---
|
||||
output_lines.append("\n--- Spot Balances ---")
|
||||
spot_balances = spot_state.get('balances', [])
|
||||
if not spot_balances:
|
||||
output_lines.append(" No spot balances found.")
|
||||
else:
|
||||
balances_str = ", ".join([f"{b.get('coin')}: {float(b.get('total', 0)):,.4f}" for b in spot_balances if float(b.get('total', 0)) > 0])
|
||||
output_lines.append(f" {balances_str}")
|
||||
|
||||
# --- 3. Open Positions Table ---
|
||||
output_lines.append("\n--- Open Perpetual Positions ---")
|
||||
positions = perp_state.get('assetPositions', [])
|
||||
open_positions = [p for p in positions if p.get('position') and float(p['position'].get('szi', 0)) != 0]
|
||||
|
||||
if not open_positions:
|
||||
output_lines.append(" No open perpetual positions found.")
|
||||
output_lines.append("") # Add a line for stable refresh
|
||||
else:
|
||||
self.build_positions_table(open_positions, coin_to_strategy_map, output_lines)
|
||||
|
||||
except Exception as e:
|
||||
output_lines = [f"An error occurred: {e}"]
|
||||
|
||||
final_output = "\n".join(output_lines) + "\n\x1b[J" # \x1b[J clears to end of screen
|
||||
print(final_output, end="")
|
||||
|
||||
self._lines_printed = len(output_lines)
|
||||
sys.stdout.flush()
|
||||
|
||||
def build_positions_table(self, positions: list, coin_to_strategy_map: dict, output_lines: list):
|
||||
"""Builds the text for the positions summary table."""
|
||||
header = f"| {'Strategy':<25} | {'Coin':<6} | {'Side':<5} | {'Size':>15} | {'Entry Price':>12} | {'Mark Price':>12} | {'PNL':>15} | {'Leverage':>10} |"
|
||||
output_lines.append(header)
|
||||
output_lines.append("-" * len(header))
|
||||
|
||||
for position in positions:
|
||||
pos = position.get('position', {})
|
||||
coin = pos.get('coin', 'Unknown')
|
||||
size = float(pos.get('szi', 0))
|
||||
entry_px = float(pos.get('entryPx', 0))
|
||||
mark_px = float(pos.get('markPx', 0))
|
||||
unrealized_pnl = float(pos.get('unrealizedPnl', 0))
|
||||
|
||||
# Get leverage
|
||||
position_value = float(pos.get('positionValue', 0))
|
||||
margin_used = float(pos.get('marginUsed', 0))
|
||||
leverage = (position_value / margin_used) if margin_used > 0 else 0
|
||||
|
||||
side_text = "LONG" if size > 0 else "SHORT"
|
||||
pnl_sign = "+" if unrealized_pnl >= 0 else ""
|
||||
|
||||
# Find the strategy that owns this coin
|
||||
strategy_name = coin_to_strategy_map.get(coin, "Unmanaged")
|
||||
|
||||
# Format all values as strings
|
||||
strategy_str = f"{strategy_name:<25}"
|
||||
coin_str = f"{coin:<6}"
|
||||
side_str = f"{side_text:<5}"
|
||||
size_str = f"{size:>15.4f}"
|
||||
entry_str = f"${entry_px:>11,.2f}"
|
||||
mark_str = f"${mark_px:>11,.2f}"
|
||||
pnl_str = f"{pnl_sign}${unrealized_pnl:>14,.2f}"
|
||||
lev_str = f"{leverage:>9.1f}x"
|
||||
|
||||
output_lines.append(f"| {strategy_str} | {coin_str} | {side_str} | {size_str} | {entry_str} | {mark_str} | {pnl_str} | {lev_str} |")
|
||||
|
||||
output_lines.append("-" * len(header))
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Monitor a Hyperliquid wallet's positions in real-time.")
|
||||
parser.add_argument(
|
||||
"--log-level",
|
||||
default="normal",
|
||||
choices=['off', 'normal', 'debug'],
|
||||
help="Set the logging level for the script."
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
monitor = PositionMonitor(log_level=args.log_level)
|
||||
monitor.run()
|
||||
BIN
requirements.txt
Normal file
BIN
requirements.txt
Normal file
Binary file not shown.
271
resampler.py
271
resampler.py
@ -5,15 +5,16 @@ import sys
|
||||
import sqlite3
|
||||
import pandas as pd
|
||||
import json
|
||||
from datetime import datetime, timezone
|
||||
from datetime import datetime, timezone, timedelta
|
||||
|
||||
# Assuming logging_utils.py is in the same directory
|
||||
from logging_utils import setup_logging
|
||||
|
||||
class Resampler:
|
||||
"""
|
||||
Reads 1-minute candle data directly from the SQLite database, resamples
|
||||
it to various timeframes, and stores the results back in the database.
|
||||
Reads new 1-minute candle data from the SQLite database, resamples it to
|
||||
various timeframes, and upserts the new candles to the corresponding tables,
|
||||
preventing data duplication.
|
||||
"""
|
||||
|
||||
def __init__(self, log_level: str, coins: list, timeframes: dict):
|
||||
@ -31,13 +32,70 @@ class Resampler:
|
||||
'number_of_trades': 'sum'
|
||||
}
|
||||
self.resampling_status = self._load_existing_status()
|
||||
self.job_start_time = None
|
||||
self._ensure_tables_exist()
|
||||
|
||||
def _ensure_tables_exist(self):
|
||||
"""
|
||||
Ensures all resampled tables exist with a PRIMARY KEY on timestamp_ms.
|
||||
Attempts to migrate existing tables if the schema is incorrect.
|
||||
"""
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
for coin in self.coins_to_process:
|
||||
for tf_name in self.timeframes.keys():
|
||||
table_name = f"{coin}_{tf_name}"
|
||||
cursor = conn.cursor()
|
||||
cursor.execute(f"PRAGMA table_info('{table_name}')")
|
||||
columns = cursor.fetchall()
|
||||
if columns:
|
||||
# --- FIX: Check for the correct PRIMARY KEY on timestamp_ms ---
|
||||
pk_found = any(col[1] == 'timestamp_ms' and col[5] == 1 for col in columns)
|
||||
if not pk_found:
|
||||
logging.warning(f"Schema migration needed for table '{table_name}'.")
|
||||
try:
|
||||
conn.execute(f'ALTER TABLE "{table_name}" RENAME TO "{table_name}_old"')
|
||||
self._create_resampled_table(conn, table_name)
|
||||
# Copy data, ensuring to create the timestamp_ms
|
||||
logging.info(f" -> Migrating data for '{table_name}'...")
|
||||
old_df = pd.read_sql(f'SELECT * FROM "{table_name}_old"', conn, parse_dates=['datetime_utc'])
|
||||
if not old_df.empty:
|
||||
old_df['timestamp_ms'] = (old_df['datetime_utc'].astype('int64') // 10**6)
|
||||
# Keep only unique timestamps, preserving the last entry
|
||||
old_df.drop_duplicates(subset=['timestamp_ms'], keep='last', inplace=True)
|
||||
old_df.to_sql(table_name, conn, if_exists='append', index=False)
|
||||
logging.info(f" -> Data migration complete.")
|
||||
conn.execute(f'DROP TABLE "{table_name}_old"')
|
||||
conn.commit()
|
||||
logging.info(f"Successfully migrated schema for '{table_name}'.")
|
||||
except Exception as e:
|
||||
logging.error(f"FATAL: Migration for '{table_name}' failed: {e}. Please delete 'market_data.db' and restart.")
|
||||
sys.exit(1)
|
||||
else:
|
||||
self._create_resampled_table(conn, table_name)
|
||||
logging.info("All resampled table schemas verified.")
|
||||
|
||||
def _create_resampled_table(self, conn, table_name):
|
||||
"""Creates a new resampled table with the correct schema."""
|
||||
# --- FIX: Set PRIMARY KEY on timestamp_ms for performance and uniqueness ---
|
||||
conn.execute(f'''
|
||||
CREATE TABLE "{table_name}" (
|
||||
datetime_utc TEXT,
|
||||
timestamp_ms INTEGER PRIMARY KEY,
|
||||
open REAL,
|
||||
high REAL,
|
||||
low REAL,
|
||||
close REAL,
|
||||
volume REAL,
|
||||
number_of_trades INTEGER
|
||||
)
|
||||
''')
|
||||
|
||||
def _load_existing_status(self) -> dict:
|
||||
"""Loads the existing status file if it exists, otherwise returns an empty dict."""
|
||||
if os.path.exists(self.status_file_path):
|
||||
try:
|
||||
with open(self.status_file_path, 'r', encoding='utf-8') as f:
|
||||
logging.info(f"Loading existing status from '{self.status_file_path}'")
|
||||
logging.debug(f"Loading existing status from '{self.status_file_path}'")
|
||||
return json.load(f)
|
||||
except (IOError, json.JSONDecodeError) as e:
|
||||
logging.warning(f"Could not read existing status file. Starting fresh. Error: {e}")
|
||||
@ -47,78 +105,141 @@ class Resampler:
|
||||
"""
|
||||
Main execution function to process all configured coins and update the database.
|
||||
"""
|
||||
self.job_start_time = datetime.now(timezone.utc)
|
||||
logging.info(f"--- Resampling job started at {self.job_start_time.strftime('%Y-%m-%d %H:%M:%S %Z')} ---")
|
||||
|
||||
if '1m' in self.timeframes:
|
||||
logging.debug("Ignoring '1m' timeframe as it is the source resolution.")
|
||||
del self.timeframes['1m']
|
||||
|
||||
if not self.timeframes:
|
||||
logging.warning("No timeframes to process after filtering. Exiting job.")
|
||||
return
|
||||
|
||||
if not os.path.exists(self.db_path):
|
||||
logging.error(f"Database file '{self.db_path}' not found. "
|
||||
"Please run the data fetcher script first.")
|
||||
sys.exit(1)
|
||||
logging.error(f"Database file '{self.db_path}' not found.")
|
||||
return
|
||||
|
||||
with sqlite3.connect(self.db_path) as conn:
|
||||
conn.execute("PRAGMA journal_mode=WAL;")
|
||||
|
||||
logging.info(f"Processing {len(self.coins_to_process)} coins: {', '.join(self.coins_to_process)}")
|
||||
logging.debug(f"Processing {len(self.coins_to_process)} coins...")
|
||||
|
||||
for coin in self.coins_to_process:
|
||||
source_table_name = f"{coin}_1m"
|
||||
logging.info(f"--- Processing {coin} ---")
|
||||
logging.debug(f"--- Processing {coin} ---")
|
||||
|
||||
try:
|
||||
df = pd.read_sql(f'SELECT * FROM "{source_table_name}"', conn)
|
||||
|
||||
if df.empty:
|
||||
logging.warning(f"Source table '{source_table_name}' is empty or does not exist. Skipping.")
|
||||
continue
|
||||
|
||||
df['datetime_utc'] = pd.to_datetime(df['datetime_utc'])
|
||||
df.set_index('datetime_utc', inplace=True)
|
||||
|
||||
for tf_name, tf_code in self.timeframes.items():
|
||||
logging.info(f" Resampling to {tf_name}...")
|
||||
target_table_name = f"{coin}_{tf_name}"
|
||||
source_table_name = f"{coin}_1m"
|
||||
logging.debug(f" Updating {tf_name} table...")
|
||||
|
||||
resampled_df = df.resample(tf_code).agg(self.aggregation_logic)
|
||||
last_timestamp_ms = self._get_last_timestamp(conn, target_table_name)
|
||||
|
||||
query = f'SELECT * FROM "{source_table_name}"'
|
||||
params = ()
|
||||
if last_timestamp_ms:
|
||||
query += ' WHERE timestamp_ms >= ?'
|
||||
# Go back one interval to rebuild the last (potentially partial) candle
|
||||
try:
|
||||
interval_delta_ms = pd.to_timedelta(tf_code).total_seconds() * 1000
|
||||
except ValueError:
|
||||
# Fall back to a safe 32-day lookback for special timeframes
|
||||
interval_delta_ms = timedelta(days=32).total_seconds() * 1000
|
||||
|
||||
query_start_ms = last_timestamp_ms - interval_delta_ms
|
||||
params = (query_start_ms,)
|
||||
|
||||
df_1m = pd.read_sql(query, conn, params=params, parse_dates=['datetime_utc'])
|
||||
|
||||
if df_1m.empty:
|
||||
logging.debug(f" -> No new 1-minute data for {tf_name}. Table is up to date.")
|
||||
continue
|
||||
|
||||
df_1m.set_index('datetime_utc', inplace=True)
|
||||
resampled_df = df_1m.resample(tf_code).agg(self.aggregation_logic)
|
||||
resampled_df.dropna(how='all', inplace=True)
|
||||
|
||||
if coin not in self.resampling_status:
|
||||
self.resampling_status[coin] = {}
|
||||
|
||||
if not resampled_df.empty:
|
||||
target_table_name = f"{coin}_{tf_name}"
|
||||
resampled_df.to_sql(
|
||||
target_table_name,
|
||||
conn,
|
||||
if_exists='replace',
|
||||
index=True
|
||||
)
|
||||
|
||||
last_timestamp = resampled_df.index[-1].strftime('%Y-%m-%d %H:%M:%S')
|
||||
num_candles = len(resampled_df)
|
||||
records_to_upsert = []
|
||||
for index, row in resampled_df.iterrows():
|
||||
records_to_upsert.append((
|
||||
index.strftime('%Y-%m-%d %H:%M:%S'),
|
||||
int(index.timestamp() * 1000), # Generate timestamp_ms
|
||||
row['open'], row['high'], row['low'], row['close'],
|
||||
row['volume'], row['number_of_trades']
|
||||
))
|
||||
|
||||
cursor = conn.cursor()
|
||||
cursor.executemany(f'''
|
||||
INSERT OR REPLACE INTO "{target_table_name}" (datetime_utc, timestamp_ms, open, high, low, close, volume, number_of_trades)
|
||||
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
||||
''', records_to_upsert)
|
||||
conn.commit()
|
||||
|
||||
logging.debug(f" -> Upserted {len(resampled_df)} candles into '{target_table_name}'.")
|
||||
|
||||
if coin not in self.resampling_status: self.resampling_status[coin] = {}
|
||||
total_candles = int(self._get_table_count(conn, target_table_name))
|
||||
self.resampling_status[coin][tf_name] = {
|
||||
"last_candle_utc": last_timestamp,
|
||||
"total_candles": num_candles
|
||||
}
|
||||
else:
|
||||
logging.info(f" -> No data to save for '{coin}_{tf_name}'.")
|
||||
self.resampling_status[coin][tf_name] = {
|
||||
"last_candle_utc": "N/A",
|
||||
"total_candles": 0
|
||||
"last_candle_utc": resampled_df.index[-1].strftime('%Y-%m-%d %H:%M:%S'),
|
||||
"total_candles": total_candles
|
||||
}
|
||||
|
||||
except pd.io.sql.DatabaseError as e:
|
||||
logging.warning(f"Could not read source table '{source_table_name}': {e}")
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to process coin '{coin}': {e}")
|
||||
|
||||
self._log_summary()
|
||||
self._save_status()
|
||||
logging.info("--- Resampling process complete ---")
|
||||
logging.info(f"--- Resampling job finished at {datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M:%S %Z')} ---")
|
||||
|
||||
def _log_summary(self):
|
||||
"""Logs a summary of the total candles for each timeframe."""
|
||||
logging.info("--- Resampling Job Summary ---")
|
||||
timeframe_totals = {}
|
||||
for coin, tfs in self.resampling_status.items():
|
||||
if not isinstance(tfs, dict): continue
|
||||
for tf_name, tf_data in tfs.items():
|
||||
total = tf_data.get("total_candles", 0)
|
||||
if tf_name not in timeframe_totals:
|
||||
timeframe_totals[tf_name] = 0
|
||||
timeframe_totals[tf_name] += total
|
||||
|
||||
if not timeframe_totals:
|
||||
logging.info("No candles were resampled in this run.")
|
||||
return
|
||||
|
||||
logging.info("Total candles per timeframe across all processed coins:")
|
||||
for tf_name, total in sorted(timeframe_totals.items()):
|
||||
logging.info(f" - {tf_name:<10}: {total:,} candles")
|
||||
|
||||
def _get_last_timestamp(self, conn, table_name):
|
||||
"""Gets the millisecond timestamp of the last entry in a table."""
|
||||
try:
|
||||
# --- FIX: Query for the integer timestamp_ms, not the text datetime_utc ---
|
||||
timestamp_ms = pd.read_sql(f'SELECT MAX(timestamp_ms) FROM "{table_name}"', conn).iloc[0, 0]
|
||||
return int(timestamp_ms) if pd.notna(timestamp_ms) else None
|
||||
except (pd.io.sql.DatabaseError, IndexError):
|
||||
return None
|
||||
|
||||
def _get_table_count(self, conn, table_name):
|
||||
"""Gets the total row count of a table."""
|
||||
try:
|
||||
return pd.read_sql(f'SELECT COUNT(*) FROM "{table_name}"', conn).iloc[0, 0]
|
||||
except (pd.io.sql.DatabaseError, IndexError):
|
||||
return 0
|
||||
|
||||
def _save_status(self):
|
||||
"""Saves the final resampling status to a JSON file."""
|
||||
if not self.resampling_status:
|
||||
logging.warning("No data was resampled, skipping status file creation.")
|
||||
return
|
||||
|
||||
self.resampling_status['last_completed_utc'] = datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M:%S')
|
||||
|
||||
stop_time = datetime.now(timezone.utc)
|
||||
self.resampling_status['job_start_time_utc'] = self.job_start_time.strftime('%Y-%m-%d %H:%M:%S')
|
||||
self.resampling_status['job_stop_time_utc'] = stop_time.strftime('%Y-%m-%d %H:%M:%S')
|
||||
|
||||
self.resampling_status.pop('last_completed_utc', None)
|
||||
|
||||
try:
|
||||
with open(self.status_file_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(self.resampling_status, f, indent=4, sort_keys=True)
|
||||
@ -132,58 +253,36 @@ def parse_timeframes(tf_strings: list) -> dict:
|
||||
tf_map = {}
|
||||
for tf_str in tf_strings:
|
||||
numeric_part = ''.join(filter(str.isdigit, tf_str))
|
||||
unit = ''.join(filter(str.isalpha, tf_str)).lower()
|
||||
unit = ''.join(filter(str.isalpha, tf_str)) # Keep case for 'M'
|
||||
|
||||
key = tf_str
|
||||
code = ''
|
||||
if unit == 'm':
|
||||
if unit == 'm':
|
||||
code = f"{numeric_part}min"
|
||||
elif unit == 'w':
|
||||
# --- FIX: Use uppercase 'W' for weeks to avoid deprecation warning ---
|
||||
code = f"{numeric_part}W"
|
||||
elif unit in ['h', 'd']:
|
||||
code = f"{numeric_part}{unit}"
|
||||
else:
|
||||
elif unit.lower() == 'w':
|
||||
code = f"{numeric_part}W-MON"
|
||||
elif unit == 'M':
|
||||
code = f"{numeric_part}MS"
|
||||
key = f"{numeric_part}month"
|
||||
elif unit.lower() in ['h', 'd']:
|
||||
code = f"{numeric_part}{unit.lower()}"
|
||||
else:
|
||||
code = tf_str
|
||||
logging.warning(f"Unrecognized timeframe unit in '{tf_str}'. Using as-is.")
|
||||
|
||||
tf_map[tf_str] = code
|
||||
tf_map[key] = code
|
||||
return tf_map
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Resample 1-minute candle data from SQLite to other timeframes.")
|
||||
parser.add_argument(
|
||||
"--coins",
|
||||
nargs='+',
|
||||
default=["BTC", "ETH", "SOL", "BNB", "HYPE", "ASTER", "ZEC", "PUMP", "SUI"],
|
||||
help="List of coins to process."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--timeframes",
|
||||
nargs='+',
|
||||
default=['4m', '5m', '15m', '30m', '37m', '148m', '4h', '12h', '1d', '1w'],
|
||||
help="List of timeframes to generate (e.g., 5m 1h 1d)."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--timeframe",
|
||||
dest="timeframes",
|
||||
nargs='+',
|
||||
help=argparse.SUPPRESS
|
||||
)
|
||||
parser.add_argument(
|
||||
"--log-level",
|
||||
default="normal",
|
||||
choices=['off', 'normal', 'debug'],
|
||||
help="Set the logging level for the script."
|
||||
)
|
||||
parser.add_argument("--coins", nargs='+', required=True, help="List of coins to process.")
|
||||
parser.add_argument("--timeframes", nargs='+', required=True, help="List of timeframes to generate.")
|
||||
parser.add_argument("--log-level", default="normal", choices=['off', 'normal', 'debug'])
|
||||
args = parser.parse_args()
|
||||
|
||||
timeframes_dict = parse_timeframes(args.timeframes)
|
||||
|
||||
resampler = Resampler(
|
||||
log_level=args.log_level,
|
||||
coins=args.coins,
|
||||
timeframes=timeframes_dict
|
||||
)
|
||||
resampler = Resampler(log_level=args.log_level, coins=args.coins, timeframes=timeframes_dict)
|
||||
resampler.run()
|
||||
|
||||
|
||||
79
review.md
Normal file
79
review.md
Normal file
@ -0,0 +1,79 @@
|
||||
# Project Review and Recommendations
|
||||
|
||||
This review provides an analysis of the current state of the automated trading bot project, proposes specific code improvements, and identifies files that appear to be unused or are one-off utilities that could be reorganized.
|
||||
|
||||
The project is a well-structured, multi-process Python application for crypto trading. It has a clear separation of concerns between data fetching, strategy execution, and trade management. The use of `multiprocessing` and a centralized `main_app.py` orchestrator is a solid architectural choice.
|
||||
|
||||
The following sections detail recommendations for improving configuration management, code structure, and robustness, along with a list of files recommended for cleanup.
|
||||
|
||||
---
|
||||
|
||||
## Proposed Code Changes
|
||||
|
||||
### 1. Centralize Configuration
|
||||
|
||||
- **Issue:** Key configuration variables like `WATCHED_COINS` and `required_timeframes` are hardcoded in `main_app.py`. This makes them difficult to change without modifying the source code.
|
||||
- **Proposal:**
|
||||
- Create a central configuration file, e.g., `_data/config.json`.
|
||||
- Move `WATCHED_COINS` and `required_timeframes` into this new file.
|
||||
- Load this configuration in `main_app.py` at startup.
|
||||
- **Benefit:** Decouples configuration from code, making the application more flexible and easier to manage.
|
||||
|
||||
### 2. Refactor `main_app.py` for Clarity
|
||||
|
||||
- **Issue:** `main_app.py` is long and handles multiple responsibilities: process orchestration, dashboard rendering, and data reading.
|
||||
- **Proposal:**
|
||||
- **Abstract Process Management:** The functions for running subprocesses (e.g., `run_live_candle_fetcher`, `run_resampler_job`) contain repetitive logic for logging, shutdown handling, and process looping. This could be abstracted into a generic `ProcessRunner` class.
|
||||
- **Create a Dashboard Class:** The complex dashboard rendering logic could be moved into a separate `Dashboard` class to improve separation of concerns and make the main application loop cleaner.
|
||||
- **Benefit:** Improves code readability, reduces duplication, and makes the application easier to maintain and extend.
|
||||
|
||||
### 3. Improve Project Structure
|
||||
|
||||
- **Issue:** The root directory is cluttered with numerous Python scripts, making it difficult to distinguish between core application files, utility scripts, and old/example files.
|
||||
- **Proposal:**
|
||||
- Create a `scripts/` directory and move all one-off utility and maintenance scripts into it.
|
||||
- Consider creating a `src/` or `app/` directory to house the core application source code (`main_app.py`, `trade_executor.py`, etc.), separating it clearly from configuration, data, and documentation.
|
||||
- **Benefit:** A cleaner, more organized project structure that is easier for new developers to understand.
|
||||
|
||||
### 4. Enhance Robustness and Error Handling
|
||||
|
||||
- **Issue:** The agent loading in `trade_executor.py` relies on discovering environment variables by a naming convention (`_AGENT_PK`). This is clever but can be brittle if environment variables are named incorrectly.
|
||||
- **Proposal:**
|
||||
- Explicitly define the agent names and their corresponding environment variable keys in the proposed `_data/config.json` file. The `trade_executor` would then load only the agents specified in the configuration.
|
||||
- **Benefit:** Makes agent configuration more explicit and less prone to errors from stray environment variables.
|
||||
|
||||
---
|
||||
|
||||
## Identified Unused/Utility Files
|
||||
|
||||
The following files were identified as likely being unused by the core application, being obsolete, or serving as one-off utilities. It is recommended to **move them to a `scripts/` directory** or **delete them** if they are obsolete.
|
||||
|
||||
### Obsolete / Old Versions:
|
||||
- `data_fetcher_old.py`
|
||||
- `market_old.py`
|
||||
- `base_strategy.py` (The one in the root directory; the one in `strategies/` is used).
|
||||
|
||||
### One-Off Utility Scripts (Recommend moving to `scripts/`):
|
||||
- `!migrate_to_sqlite.py`
|
||||
- `import_csv.py`
|
||||
- `del_market_cap_tables.py`
|
||||
- `fix_timestamps.py`
|
||||
- `list_coins.py`
|
||||
- `create_agent.py`
|
||||
|
||||
### Examples / Unused Code:
|
||||
- `basic_ws.py` (Appears to be an example file).
|
||||
- `backtester.py`
|
||||
- `strategy_sma_cross.py` (A strategy file in the root, not in the `strategies` folder).
|
||||
- `strategy_template.py`
|
||||
|
||||
### Standalone / Potentially Unused Core Files:
|
||||
The following files seem to have their logic already integrated into the main multi-process application. They might be remnants of a previous architecture and may not be needed as standalone scripts.
|
||||
- `address_monitor.py`
|
||||
- `position_monitor.py`
|
||||
- `trade_log.py`
|
||||
- `wallet_data.py`
|
||||
- `whale_tracker.py`
|
||||
|
||||
### Data / Log Files (Recommend archiving or deleting):
|
||||
- `hyperliquid_wallet_data_*.json` (These appear to be backups or logs).
|
||||
Submodule sdk/hyperliquid-python-sdk deleted from 64b252e99d
166
strategies/base_strategy.py
Normal file
166
strategies/base_strategy.py
Normal file
@ -0,0 +1,166 @@
|
||||
from abc import ABC, abstractmethod
|
||||
import pandas as pd
|
||||
import json
|
||||
import os
|
||||
import logging
|
||||
from datetime import datetime, timezone
|
||||
import sqlite3
|
||||
import multiprocessing
|
||||
import time
|
||||
|
||||
from logging_utils import setup_logging
|
||||
from hyperliquid.info import Info
|
||||
from hyperliquid.utils import constants
|
||||
|
||||
class BaseStrategy(ABC):
|
||||
"""
|
||||
An abstract base class that defines the blueprint for all trading strategies.
|
||||
It provides common functionality like loading data, saving status, and state management.
|
||||
"""
|
||||
|
||||
def __init__(self, strategy_name: str, params: dict, trade_signal_queue: multiprocessing.Queue = None, shared_status: dict = None):
|
||||
self.strategy_name = strategy_name
|
||||
self.params = params
|
||||
self.trade_signal_queue = trade_signal_queue
|
||||
# Optional multiprocessing.Manager().dict() to hold live status (avoids file IO)
|
||||
self.shared_status = shared_status
|
||||
|
||||
self.coin = params.get("coin", "N/A")
|
||||
self.timeframe = params.get("timeframe", "N/A")
|
||||
self.db_path = os.path.join("_data", "market_data.db")
|
||||
self.status_file_path = os.path.join("_data", f"strategy_status_{self.strategy_name}.json")
|
||||
|
||||
self.current_signal = "INIT"
|
||||
self.last_signal_change_utc = None
|
||||
self.signal_price = None
|
||||
|
||||
# Note: Logging is set up by the run_strategy function
|
||||
|
||||
def load_data(self) -> pd.DataFrame:
|
||||
"""Loads historical data for the configured coin and timeframe."""
|
||||
table_name = f"{self.coin}_{self.timeframe}"
|
||||
|
||||
periods = [v for k, v in self.params.items() if 'period' in k or '_ma' in k or 'slow' in k or 'fast' in k]
|
||||
limit = max(periods) + 50 if periods else 500
|
||||
|
||||
try:
|
||||
with sqlite3.connect(f"file:{self.db_path}?mode=ro", uri=True) as conn:
|
||||
query = f'SELECT * FROM "{table_name}" ORDER BY datetime_utc DESC LIMIT {limit}'
|
||||
df = pd.read_sql(query, conn, parse_dates=['datetime_utc'])
|
||||
if df.empty: return pd.DataFrame()
|
||||
df.set_index('datetime_utc', inplace=True)
|
||||
df.sort_index(inplace=True)
|
||||
return df
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to load data from table '{table_name}': {e}")
|
||||
return pd.DataFrame()
|
||||
|
||||
@abstractmethod
|
||||
def calculate_signals(self, df: pd.DataFrame) -> pd.DataFrame:
|
||||
"""The core logic of the strategy. Must be implemented by child classes."""
|
||||
pass
|
||||
|
||||
def calculate_signals_and_state(self, df: pd.DataFrame) -> bool:
|
||||
"""
|
||||
A wrapper that calls the strategy's signal calculation, determines
|
||||
the last signal change, and returns True if the signal has changed.
|
||||
"""
|
||||
df_with_signals = self.calculate_signals(df)
|
||||
df_with_signals.dropna(inplace=True)
|
||||
if df_with_signals.empty:
|
||||
return False
|
||||
|
||||
df_with_signals['position_change'] = df_with_signals['signal'].diff()
|
||||
|
||||
last_signal_int = df_with_signals['signal'].iloc[-1]
|
||||
new_signal_str = "HOLD"
|
||||
if last_signal_int == 1: new_signal_str = "BUY"
|
||||
elif last_signal_int == -1: new_signal_str = "SELL"
|
||||
|
||||
signal_changed = False
|
||||
if self.current_signal == "INIT":
|
||||
if new_signal_str == "BUY": self.current_signal = "INIT_BUY"
|
||||
elif new_signal_str == "SELL": self.current_signal = "INIT_SELL"
|
||||
else: self.current_signal = "HOLD"
|
||||
signal_changed = True
|
||||
elif new_signal_str != self.current_signal:
|
||||
self.current_signal = new_signal_str
|
||||
signal_changed = True
|
||||
|
||||
if signal_changed:
|
||||
last_change_series = df_with_signals[df_with_signals['position_change'] != 0]
|
||||
if not last_change_series.empty:
|
||||
last_change_row = last_change_series.iloc[-1]
|
||||
self.last_signal_change_utc = last_change_row.name.tz_localize('UTC').isoformat()
|
||||
self.signal_price = last_change_row['close']
|
||||
|
||||
return signal_changed
|
||||
|
||||
def _save_status(self):
|
||||
"""Saves the current strategy state to its JSON file."""
|
||||
status = {
|
||||
"strategy_name": self.strategy_name,
|
||||
"current_signal": self.current_signal,
|
||||
"last_signal_change_utc": self.last_signal_change_utc,
|
||||
"signal_price": self.signal_price,
|
||||
"last_checked_utc": datetime.now(timezone.utc).isoformat()
|
||||
}
|
||||
# If a shared status dict is provided (Manager.dict()), update it instead of writing files
|
||||
try:
|
||||
if self.shared_status is not None:
|
||||
try:
|
||||
# store the status under the strategy name for easy lookup
|
||||
self.shared_status[self.strategy_name] = status
|
||||
except Exception:
|
||||
# Manager proxies may not accept nested mutable objects consistently; assign a copy
|
||||
self.shared_status[self.strategy_name] = dict(status)
|
||||
else:
|
||||
with open(self.status_file_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(status, f, indent=4)
|
||||
except IOError as e:
|
||||
logging.error(f"Failed to write status file for {self.strategy_name}: {e}")
|
||||
|
||||
def run_polling_loop(self):
|
||||
"""
|
||||
The default execution loop for polling-based strategies (e.g., SMAs).
|
||||
"""
|
||||
while True:
|
||||
df = self.load_data()
|
||||
if df.empty:
|
||||
logging.warning("No data loaded. Waiting 1 minute...")
|
||||
time.sleep(60)
|
||||
continue
|
||||
|
||||
signal_changed = self.calculate_signals_and_state(df.copy())
|
||||
self._save_status()
|
||||
|
||||
if signal_changed or self.current_signal == "INIT_BUY" or self.current_signal == "INIT_SELL":
|
||||
logging.warning(f"New signal detected: {self.current_signal}")
|
||||
self.trade_signal_queue.put({
|
||||
"strategy_name": self.strategy_name,
|
||||
"signal": self.current_signal,
|
||||
"coin": self.coin,
|
||||
"signal_price": self.signal_price,
|
||||
"config": {"agent": self.params.get("agent"), "parameters": self.params}
|
||||
})
|
||||
if self.current_signal == "INIT_BUY": self.current_signal = "BUY"
|
||||
if self.current_signal == "INIT_SELL": self.current_signal = "SELL"
|
||||
|
||||
logging.info(f"Current Signal: {self.current_signal}")
|
||||
time.sleep(60)
|
||||
|
||||
def run_event_loop(self):
|
||||
"""
|
||||
A placeholder for event-driven (WebSocket) strategies.
|
||||
Child classes must override this.
|
||||
"""
|
||||
logging.error("run_event_loop() is not implemented for this strategy.")
|
||||
time.sleep(3600) # Sleep for an hour to prevent rapid error loops
|
||||
|
||||
def on_fill_message(self, message):
|
||||
"""
|
||||
Placeholder for the WebSocket callback.
|
||||
Child classes must override this.
|
||||
"""
|
||||
pass
|
||||
|
||||
353
strategies/copy_trader_strategy.py
Normal file
353
strategies/copy_trader_strategy.py
Normal file
@ -0,0 +1,353 @@
|
||||
import logging
|
||||
import time
|
||||
import json
|
||||
import os
|
||||
from datetime import datetime, timezone
|
||||
from hyperliquid.info import Info
|
||||
from hyperliquid.utils import constants
|
||||
|
||||
from strategies.base_strategy import BaseStrategy
|
||||
|
||||
class CopyTraderStrategy(BaseStrategy):
|
||||
"""
|
||||
An event-driven strategy that monitors a target wallet address and
|
||||
copies its trades for a specific set of allowed coins.
|
||||
|
||||
This strategy is STATELESS. It translates a target's fill direction
|
||||
(e.g., "Open Long") directly into an explicit signal
|
||||
(e.g., "OPEN_LONG") for the PositionManager.
|
||||
"""
|
||||
def __init__(self, strategy_name: str, params: dict, trade_signal_queue, shared_status: dict = None):
|
||||
# --- MODIFIED: Pass the correct queue to the parent ---
|
||||
# The event-driven copy trader should send orders to the order_execution_queue
|
||||
# We will assume the queue passed in is the correct one (as setup in main_app.py)
|
||||
super().__init__(strategy_name, params, trade_signal_queue, shared_status)
|
||||
|
||||
self.target_address = self.params.get("target_address", "").lower()
|
||||
self.coins_to_copy = self.params.get("coins_to_copy", {})
|
||||
# Convert all coin keys to uppercase for consistency
|
||||
self.coins_to_copy = {k.upper(): v for k, v in self.coins_to_copy.items()}
|
||||
self.allowed_coins = list(self.coins_to_copy.keys())
|
||||
|
||||
if not self.target_address:
|
||||
logging.error("No 'target_address' specified in parameters for copy trader.")
|
||||
raise ValueError("target_address is required")
|
||||
if not self.allowed_coins:
|
||||
logging.warning("No 'coins_to_copy' configured. This strategy will not copy any trades.")
|
||||
|
||||
self.info = None # Will be initialized in the run loop
|
||||
|
||||
# --- REMOVED: All local state management ---
|
||||
# self.position_state_file = ...
|
||||
# self.current_positions = ...
|
||||
|
||||
# --- MODIFIED: Check if shared_status is None before using it ---
|
||||
if self.shared_status is None:
|
||||
logging.warning("No shared_status dictionary provided. Initializing a new one.")
|
||||
self.shared_status = {}
|
||||
|
||||
self.current_signal = self.shared_status.get("current_signal", "WAIT")
|
||||
self.signal_price = self.shared_status.get("signal_price")
|
||||
self.last_signal_change_utc = self.shared_status.get("last_signal_change_utc")
|
||||
|
||||
self.start_time_utc = datetime.now(timezone.utc)
|
||||
logging.info(f"Strategy initialized. Ignoring all trades before {self.start_time_utc.isoformat()}")
|
||||
|
||||
# --- REMOVED: _load_position_state ---
|
||||
# --- REMOVED: _save_position_state ---
|
||||
|
||||
def calculate_signals(self, df):
|
||||
# This strategy is event-driven, so it does not use polling-based signal calculation.
|
||||
pass
|
||||
|
||||
def send_explicit_signal(self, signal: str, coin: str, price: float, trade_params: dict, size: float):
|
||||
"""Helper to send a formatted signal to the PositionManager."""
|
||||
config = {
|
||||
# --- MODIFIED: Ensure agent is read from params ---
|
||||
"agent": self.params.get("agent"),
|
||||
"parameters": trade_params
|
||||
}
|
||||
|
||||
# --- MODIFIED: Use self.trade_signal_queue (which is the queue passed in) ---
|
||||
self.trade_signal_queue.put({
|
||||
"strategy_name": self.strategy_name,
|
||||
"signal": signal, # e.g., "OPEN_LONG", "CLOSE_SHORT"
|
||||
"coin": coin,
|
||||
"signal_price": price,
|
||||
"config": config,
|
||||
"size": size # Explicitly pass size (or leverage for leverage updates)
|
||||
})
|
||||
logging.info(f"Explicit signal SENT: {signal} {coin} @ {price}, Size: {size}")
|
||||
|
||||
def on_fill_message(self, message):
|
||||
"""
|
||||
This is the callback function that gets triggered by the WebSocket
|
||||
every time the monitored address has an event.
|
||||
"""
|
||||
try:
|
||||
# --- NEW: Add logging to see ALL messages ---
|
||||
logging.debug(f"Received WebSocket message: {message}")
|
||||
|
||||
channel = message.get("channel")
|
||||
if channel not in ("user", "userFills", "userEvents"):
|
||||
# --- NEW: Added debug logging ---
|
||||
logging.debug(f"Ignoring message from unhandled channel: {channel}")
|
||||
return
|
||||
|
||||
data = message.get("data")
|
||||
if not data:
|
||||
# --- NEW: Added debug logging ---
|
||||
logging.debug("Message received with no 'data' field. Ignoring.")
|
||||
return
|
||||
|
||||
# --- NEW: Check for user address FIRST ---
|
||||
user_address = data.get("user", "").lower()
|
||||
if not user_address:
|
||||
logging.debug("Received message with 'data' but no 'user'. Ignoring.")
|
||||
return
|
||||
|
||||
# --- MODIFIED: Check for 'fills' vs. other event types ---
|
||||
# This check is still valid for userFills
|
||||
if "fills" not in data or not data.get("fills"):
|
||||
# This is a userEvent, but not a fill (e.g., order placement, cancel, withdrawal)
|
||||
event_type = data.get("type") # e.g., 'order', 'cancel', 'withdrawal'
|
||||
if event_type:
|
||||
logging.debug(f"Received non-fill user event: '{event_type}'. Ignoring.")
|
||||
else:
|
||||
logging.debug(f"Received 'data' message with no 'fills'. Ignoring.")
|
||||
return
|
||||
|
||||
# --- This line is now safe to run ---
|
||||
if user_address != self.target_address:
|
||||
# This shouldn't happen if the subscription is correct, but good to check
|
||||
logging.warning(f"Received fill for wrong user: {user_address}")
|
||||
return
|
||||
|
||||
fills = data.get("fills")
|
||||
logging.debug(f"Received {len(fills)} fill(s) for user {user_address}")
|
||||
|
||||
for fill in fills:
|
||||
# Check if the trade is new or historical
|
||||
trade_time = datetime.fromtimestamp(fill['time'] / 1000, tz=timezone.utc)
|
||||
if trade_time < self.start_time_utc:
|
||||
logging.info(f"Ignoring stale/historical trade from {trade_time.isoformat()}")
|
||||
continue
|
||||
|
||||
coin = fill.get('coin').upper()
|
||||
|
||||
if coin in self.allowed_coins:
|
||||
price = float(fill.get('px'))
|
||||
|
||||
# --- MODIFIED: Use the target's fill size ---
|
||||
fill_size = float(fill.get('sz')) # Target's size
|
||||
|
||||
if fill_size == 0:
|
||||
logging.warning(f"Ignoring fill with size 0.")
|
||||
continue
|
||||
|
||||
# --- NEW: Get the fill direction ---
|
||||
# "dir": "Open Long", "Close Long", "Open Short", "Close Short"
|
||||
fill_direction = fill.get("dir")
|
||||
|
||||
# --- NEW: Get startPosition to calculate flip sizes ---
|
||||
start_pos_size = float(fill.get('startPosition', 0.0))
|
||||
|
||||
if not fill_direction:
|
||||
logging.warning(f"Fill message missing 'dir'. Ignoring fill: {fill}")
|
||||
continue
|
||||
|
||||
# Get our strategy's configured leverage for this coin
|
||||
coin_config = self.coins_to_copy.get(coin)
|
||||
|
||||
# --- REMOVED: Check for coin_config.get("size") ---
|
||||
# --- REMOVED: strategy_trade_size = coin_config.get("size") ---
|
||||
|
||||
# Prepare config for the signal
|
||||
trade_params = self.params.copy()
|
||||
if coin_config:
|
||||
trade_params.update(coin_config)
|
||||
|
||||
# --- REMOVED: All stateful logic (current_local_pos, etc.) ---
|
||||
|
||||
# --- MODIFIED: Expanded logic to handle flip directions ---
|
||||
signal_sent = False
|
||||
dashboard_signal = ""
|
||||
|
||||
if fill_direction == "Open Long":
|
||||
logging.warning(f"[{coin}] Target action: {fill_direction}. Sending signal: OPEN_LONG")
|
||||
self.send_explicit_signal("OPEN_LONG", coin, price, trade_params, fill_size)
|
||||
signal_sent = True
|
||||
dashboard_signal = "OPEN_LONG"
|
||||
|
||||
elif fill_direction == "Close Long":
|
||||
logging.warning(f"[{coin}] Target action: {fill_direction}. Sending signal: CLOSE_LONG")
|
||||
self.send_explicit_signal("CLOSE_LONG", coin, price, trade_params, fill_size)
|
||||
signal_sent = True
|
||||
dashboard_signal = "CLOSE_LONG"
|
||||
|
||||
elif fill_direction == "Open Short":
|
||||
logging.warning(f"[{coin}] Target action: {fill_direction}. Sending signal: OPEN_SHORT")
|
||||
self.send_explicit_signal("OPEN_SHORT", coin, price, trade_params, fill_size)
|
||||
signal_sent = True
|
||||
dashboard_signal = "OPEN_SHORT"
|
||||
|
||||
elif fill_direction == "Close Short":
|
||||
logging.warning(f"[{coin}] Target action: {fill_direction}. Sending signal: CLOSE_SHORT")
|
||||
self.send_explicit_signal("CLOSE_SHORT", coin, price, trade_params, fill_size)
|
||||
signal_sent = True
|
||||
dashboard_signal = "CLOSE_SHORT"
|
||||
|
||||
elif fill_direction == "Short > Long":
|
||||
logging.warning(f"[{coin}] Target action: {fill_direction}. Sending CLOSE_SHORT then OPEN_LONG.")
|
||||
close_size = abs(start_pos_size)
|
||||
open_size = fill_size - close_size
|
||||
|
||||
if close_size > 0:
|
||||
self.send_explicit_signal("CLOSE_SHORT", coin, price, trade_params, close_size)
|
||||
|
||||
if open_size > 0:
|
||||
self.send_explicit_signal("OPEN_LONG", coin, price, trade_params, open_size)
|
||||
|
||||
signal_sent = True
|
||||
dashboard_signal = "FLIP_TO_LONG"
|
||||
|
||||
elif fill_direction == "Long > Short":
|
||||
logging.warning(f"[{coin}] Target action: {fill_direction}. Sending CLOSE_LONG then OPEN_SHORT.")
|
||||
close_size = abs(start_pos_size)
|
||||
open_size = fill_size - close_size
|
||||
|
||||
if close_size > 0:
|
||||
self.send_explicit_signal("CLOSE_LONG", coin, price, trade_params, close_size)
|
||||
|
||||
if open_size > 0:
|
||||
self.send_explicit_signal("OPEN_SHORT", coin, price, trade_params, open_size)
|
||||
|
||||
signal_sent = True
|
||||
dashboard_signal = "FLIP_TO_SHORT"
|
||||
|
||||
|
||||
if signal_sent:
|
||||
# Update dashboard status
|
||||
self.current_signal = dashboard_signal # Show the action
|
||||
self.signal_price = price
|
||||
self.last_signal_change_utc = trade_time.isoformat()
|
||||
self.coin = coin # Update coin for dashboard
|
||||
self.size = fill_size # Update size for dashboard
|
||||
self._save_status() # For dashboard
|
||||
|
||||
logging.info(f"Source trade logged: {json.dumps(fill)}")
|
||||
else:
|
||||
logging.info(f"[{coin}] Ignoring unhandled fill direction: {fill_direction}")
|
||||
else:
|
||||
logging.info(f"Ignoring fill for unmonitored coin: {coin}")
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error in on_fill_message: {e}", exc_info=True)
|
||||
|
||||
def _connect_and_subscribe(self):
|
||||
"""
|
||||
Establishes a new WebSocket connection and subscribes to the userFills channel.
|
||||
"""
|
||||
try:
|
||||
logging.info("Connecting to Hyperliquid WebSocket...")
|
||||
self.info = Info(constants.MAINNET_API_URL, skip_ws=False)
|
||||
|
||||
# --- MODIFIED: Reverted to 'userFills' as requested ---
|
||||
subscription = {"type": "userFills", "user": self.target_address}
|
||||
self.info.subscribe(subscription, self.on_fill_message)
|
||||
logging.info(f"Subscribed to 'userFills' for target address: {self.target_address}")
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to connect or subscribe: {e}")
|
||||
self.info = None
|
||||
return False
|
||||
|
||||
def run_event_loop(self):
|
||||
"""
|
||||
This method overrides the default polling loop. It establishes a
|
||||
persistent WebSocket connection and runs a watchdog to ensure
|
||||
it stays connected.
|
||||
"""
|
||||
try:
|
||||
if not self._connect_and_subscribe():
|
||||
# If connection fails on start, wait 60s before letting the process restart
|
||||
time.sleep(60)
|
||||
return
|
||||
|
||||
# --- MODIFIED: Add a small delay to ensure Info object is ready for REST calls ---
|
||||
logging.info("Connection established. Waiting 2 seconds for Info client to be ready...")
|
||||
time.sleep(2)
|
||||
# --- END MODIFICATION ---
|
||||
|
||||
# --- NEW: Set initial leverage for all monitored coins ---
|
||||
logging.info("Setting initial leverage for all monitored coins...")
|
||||
try:
|
||||
all_mids = self.info.all_mids()
|
||||
for coin_key, coin_config in self.coins_to_copy.items():
|
||||
coin = coin_key.upper()
|
||||
# Use a failsafe price of 1.0 if coin not in mids (e.g., new listing)
|
||||
current_price = float(all_mids.get(coin, 1.0))
|
||||
|
||||
leverage_long = coin_config.get('leverage_long', 2)
|
||||
leverage_short = coin_config.get('leverage_short', 2)
|
||||
|
||||
# Prepare config for the signal
|
||||
trade_params = self.params.copy()
|
||||
trade_params.update(coin_config)
|
||||
|
||||
# Send LONG leverage update
|
||||
# The 'size' param is used to pass the leverage value for this signal type
|
||||
self.send_explicit_signal("UPDATE_LEVERAGE_LONG", coin, current_price, trade_params, leverage_long)
|
||||
|
||||
# Send SHORT leverage update
|
||||
self.send_explicit_signal("UPDATE_LEVERAGE_SHORT", coin, current_price, trade_params, leverage_short)
|
||||
|
||||
logging.info(f"Sent initial leverage signals for {coin} (Long: {leverage_long}x, Short: {leverage_short}x)")
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to set initial leverage: {e}", exc_info=True)
|
||||
# --- END NEW LEVERAGE LOGIC ---
|
||||
|
||||
# Save the initial "WAIT" status
|
||||
self._save_status()
|
||||
|
||||
while True:
|
||||
try:
|
||||
time.sleep(15) # Check the connection every 15 seconds
|
||||
|
||||
if self.info is None or not self.info.ws_manager.is_alive():
|
||||
logging.error(f"WebSocket connection lost. Attempting to reconnect...")
|
||||
|
||||
if self.info and self.info.ws_manager:
|
||||
try:
|
||||
self.info.ws_manager.stop()
|
||||
except Exception as e:
|
||||
logging.error(f"Error stopping old ws_manager: {e}")
|
||||
|
||||
if not self._connect_and_subscribe():
|
||||
logging.error("Reconnect failed, will retry in 15s.")
|
||||
else:
|
||||
logging.info("Successfully reconnected to WebSocket.")
|
||||
self._save_status()
|
||||
else:
|
||||
logging.debug("Watchdog check: WebSocket connection is active.")
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"An error occurred in the watchdog loop: {e}", exc_info=True)
|
||||
|
||||
except KeyboardInterrupt:
|
||||
# --- MODIFIED: No positions to close, just exit ---
|
||||
logging.warning(f"Shutdown signal received. Exiting strategy '{self.strategy_name}'.")
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"An unhandled error occurred in run_event_loop: {e}", exc_info=True)
|
||||
|
||||
finally:
|
||||
if self.info and self.info.ws_manager and self.info.ws_manager.is_alive():
|
||||
try:
|
||||
self.info.ws_manager.stop()
|
||||
logging.info("WebSocket connection stopped.")
|
||||
except Exception as e:
|
||||
logging.error(f"Error stopping ws_manager on exit: {e}")
|
||||
|
||||
30
strategies/ma_cross_strategy.py
Normal file
30
strategies/ma_cross_strategy.py
Normal file
@ -0,0 +1,30 @@
|
||||
import pandas as pd
|
||||
from strategies.base_strategy import BaseStrategy
|
||||
import logging
|
||||
|
||||
class MaCrossStrategy(BaseStrategy):
|
||||
"""
|
||||
A strategy based on a fast Simple Moving Average (SMA) crossing
|
||||
a slow SMA.
|
||||
"""
|
||||
# --- FIX: Changed 3rd argument from log_level to trade_signal_queue ---
|
||||
def __init__(self, strategy_name: str, params: dict, trade_signal_queue):
|
||||
# --- FIX: Passed trade_signal_queue to the parent class ---
|
||||
super().__init__(strategy_name, params, trade_signal_queue)
|
||||
self.fast_ma_period = self.params.get('short_ma') or self.params.get('fast') or 0
|
||||
self.slow_ma_period = self.params.get('long_ma') or self.params.get('slow') or 0
|
||||
|
||||
def calculate_signals(self, df: pd.DataFrame) -> pd.DataFrame:
|
||||
if not self.fast_ma_period or not self.slow_ma_period or len(df) < self.slow_ma_period:
|
||||
logging.warning(f"Not enough data for MA periods.")
|
||||
df['signal'] = 0
|
||||
return df
|
||||
|
||||
df['fast_sma'] = df['close'].rolling(window=self.fast_ma_period).mean()
|
||||
df['slow_sma'] = df['close'].rolling(window=self.slow_ma_period).mean()
|
||||
|
||||
df['signal'] = 0
|
||||
df.loc[df['fast_sma'] > df['slow_sma'], 'signal'] = 1
|
||||
df.loc[df['fast_sma'] < df['slow_sma'], 'signal'] = -1
|
||||
|
||||
return df
|
||||
27
strategies/single_sma_strategy.py
Normal file
27
strategies/single_sma_strategy.py
Normal file
@ -0,0 +1,27 @@
|
||||
import pandas as pd
|
||||
from strategies.base_strategy import BaseStrategy
|
||||
import logging
|
||||
|
||||
class SingleSmaStrategy(BaseStrategy):
|
||||
"""
|
||||
A strategy based on the price crossing a single Simple Moving Average (SMA).
|
||||
"""
|
||||
# --- FIX: Added trade_signal_queue to the constructor ---
|
||||
def __init__(self, strategy_name: str, params: dict, trade_signal_queue):
|
||||
# --- FIX: Passed trade_signal_queue to the parent class ---
|
||||
super().__init__(strategy_name, params, trade_signal_queue)
|
||||
self.sma_period = self.params.get('sma_period', 0)
|
||||
|
||||
def calculate_signals(self, df: pd.DataFrame) -> pd.DataFrame:
|
||||
if not self.sma_period or len(df) < self.sma_period:
|
||||
logging.warning(f"Not enough data for SMA period {self.sma_period}.")
|
||||
df['signal'] = 0
|
||||
return df
|
||||
|
||||
df['sma'] = df['close'].rolling(window=self.sma_period).mean()
|
||||
|
||||
df['signal'] = 0
|
||||
df.loc[df['close'] > df['sma'], 'signal'] = 1
|
||||
df.loc[df['close'] < df['sma'], 'signal'] = -1
|
||||
|
||||
return df
|
||||
85
strategy_runner.py
Normal file
85
strategy_runner.py
Normal file
@ -0,0 +1,85 @@
|
||||
import argparse
|
||||
import logging
|
||||
import sys
|
||||
import time
|
||||
import pandas as pd
|
||||
import sqlite3
|
||||
import json
|
||||
import os
|
||||
from datetime import datetime, timezone
|
||||
import importlib
|
||||
|
||||
from logging_utils import setup_logging
|
||||
from strategies.base_strategy import BaseStrategy
|
||||
|
||||
class StrategyRunner:
|
||||
"""
|
||||
A generic runner that can execute any strategy that adheres to the
|
||||
BaseStrategy blueprint. It handles the main logic loop, including data
|
||||
loading, signal calculation, status saving, and sleeping.
|
||||
"""
|
||||
|
||||
def __init__(self, strategy_name: str, log_level: str):
|
||||
self.strategy_name = strategy_name
|
||||
self.log_level = log_level
|
||||
self.config = self._load_strategy_config()
|
||||
if not self.config:
|
||||
print(f"FATAL: Strategy '{strategy_name}' not found in configuration.")
|
||||
sys.exit(1)
|
||||
|
||||
# Dynamically import and instantiate the strategy logic class
|
||||
try:
|
||||
module_path, class_name = self.config['class'].rsplit('.', 1)
|
||||
module = importlib.import_module(module_path)
|
||||
StrategyClass = getattr(module, class_name)
|
||||
self.strategy_instance = StrategyClass(strategy_name, self.config['parameters'], self.log_level)
|
||||
except (ImportError, AttributeError, KeyError) as e:
|
||||
print(f"FATAL: Could not load strategy class for '{strategy_name}': {e}")
|
||||
sys.exit(1)
|
||||
|
||||
def _load_strategy_config(self) -> dict:
|
||||
"""Loads the configuration for the specified strategy."""
|
||||
config_path = os.path.join("_data", "strategies.json")
|
||||
try:
|
||||
with open(config_path, 'r') as f:
|
||||
all_configs = json.load(f)
|
||||
return all_configs.get(self.strategy_name)
|
||||
except (FileNotFoundError, json.JSONDecodeError) as e:
|
||||
print(f"FATAL: Could not load strategy configuration: {e}")
|
||||
return None
|
||||
|
||||
def run(self):
|
||||
"""Main loop: loads data, calculates signals, saves status, and sleeps."""
|
||||
logging.info(f"Starting main logic loop for {self.strategy_instance.coin} on {self.strategy_instance.timeframe}.")
|
||||
while True:
|
||||
df = self.strategy_instance.load_data()
|
||||
if df.empty:
|
||||
logging.warning("No data loaded. Waiting 1 minute before retrying...")
|
||||
time.sleep(60)
|
||||
continue
|
||||
|
||||
# The strategy instance calculates signals and updates its internal state
|
||||
self.strategy_instance.calculate_signals_and_state(df.copy())
|
||||
self.strategy_instance._save_status() # Save the new state
|
||||
|
||||
logging.info(f"Current Signal: {self.strategy_instance.current_signal}")
|
||||
|
||||
# Simple 1-minute wait for the next cycle
|
||||
# A more precise timing mechanism could be implemented here if needed
|
||||
time.sleep(60)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="A generic runner for trading strategies.")
|
||||
parser.add_argument("--name", required=True, help="The name of the strategy instance from strategies.json.")
|
||||
parser.add_argument("--log-level", default="normal", choices=['off', 'normal', 'debug'])
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
runner = StrategyRunner(strategy_name=args.name, log_level=args.log_level)
|
||||
runner.run()
|
||||
except KeyboardInterrupt:
|
||||
logging.info("Strategy runner stopped.")
|
||||
except Exception as e:
|
||||
logging.error(f"A critical error occurred in the strategy runner: {e}")
|
||||
sys.exit(1)
|
||||
219
strategy_sma_cross.py
Normal file
219
strategy_sma_cross.py
Normal file
@ -0,0 +1,219 @@
|
||||
import argparse
|
||||
import logging
|
||||
import sys
|
||||
import time
|
||||
import pandas as pd
|
||||
import sqlite3
|
||||
import json
|
||||
import os
|
||||
from datetime import datetime, timezone, timedelta
|
||||
|
||||
from logging_utils import setup_logging
|
||||
|
||||
class SmaCrossStrategy:
|
||||
"""
|
||||
A flexible strategy that can operate in two modes:
|
||||
1. Fast SMA / Slow SMA Crossover (if both 'fast' and 'slow' params are set)
|
||||
2. Price / Single SMA Crossover (if only one 'fast' or 'slow' param is set)
|
||||
"""
|
||||
|
||||
def __init__(self, strategy_name: str, params: dict, log_level: str):
|
||||
self.strategy_name = strategy_name
|
||||
self.params = params
|
||||
self.coin = params.get("coin", "N/A")
|
||||
self.timeframe = params.get("timeframe", "N/A")
|
||||
|
||||
# Load fast and slow SMA periods, defaulting to 0 if not present
|
||||
self.fast_ma_period = params.get("fast", 0)
|
||||
self.slow_ma_period = params.get("slow", 0)
|
||||
|
||||
self.db_path = os.path.join("_data", "market_data.db")
|
||||
self.status_file_path = os.path.join("_data", f"strategy_status_{self.strategy_name}.json")
|
||||
|
||||
# Strategy state variables
|
||||
self.current_signal = "INIT"
|
||||
self.last_signal_change_utc = None
|
||||
self.signal_price = None
|
||||
self.fast_ma_value = None
|
||||
self.slow_ma_value = None
|
||||
|
||||
setup_logging(log_level, f"Strategy-{self.strategy_name}")
|
||||
logging.info(f"Initializing SMA Crossover strategy with parameters:")
|
||||
for key, value in self.params.items():
|
||||
logging.info(f" - {key}: {value}")
|
||||
|
||||
def load_data(self) -> pd.DataFrame:
|
||||
"""Loads historical data, ensuring enough for the longest SMA calculation."""
|
||||
table_name = f"{self.coin}_{self.timeframe}"
|
||||
|
||||
# Determine the longest period needed for calculations
|
||||
longest_period = max(self.fast_ma_period or 0, self.slow_ma_period or 0)
|
||||
if longest_period == 0:
|
||||
logging.error("No valid SMA periods ('fast' or 'slow' > 0) are defined in parameters.")
|
||||
return pd.DataFrame()
|
||||
|
||||
limit = longest_period + 50
|
||||
|
||||
try:
|
||||
with sqlite3.connect(f"file:{self.db_path}?mode=ro", uri=True) as conn:
|
||||
query = f'SELECT * FROM "{table_name}" ORDER BY datetime_utc DESC LIMIT {limit}'
|
||||
df = pd.read_sql(query, conn)
|
||||
if df.empty: return pd.DataFrame()
|
||||
|
||||
df['datetime_utc'] = pd.to_datetime(df['datetime_utc'])
|
||||
df.set_index('datetime_utc', inplace=True)
|
||||
df.sort_index(inplace=True)
|
||||
return df
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to load data from table '{table_name}': {e}")
|
||||
return pd.DataFrame()
|
||||
|
||||
def _calculate_signals(self, data: pd.DataFrame):
|
||||
"""
|
||||
Analyzes historical data to find the last crossover event based on the
|
||||
configured parameters (either dual or single SMA mode).
|
||||
"""
|
||||
# --- DUAL SMA CROSSOVER LOGIC ---
|
||||
if self.fast_ma_period and self.slow_ma_period:
|
||||
if len(data) < self.slow_ma_period + 1:
|
||||
self.current_signal = "INSUFFICIENT DATA"
|
||||
return
|
||||
|
||||
data['fast_sma'] = data['close'].rolling(window=self.fast_ma_period).mean()
|
||||
data['slow_sma'] = data['close'].rolling(window=self.slow_ma_period).mean()
|
||||
self.fast_ma_value = data['fast_sma'].iloc[-1]
|
||||
self.slow_ma_value = data['slow_sma'].iloc[-1]
|
||||
|
||||
# Position is 1 for Golden Cross (fast > slow), -1 for Death Cross
|
||||
data['position'] = 0
|
||||
data.loc[data['fast_sma'] > data['slow_sma'], 'position'] = 1
|
||||
data.loc[data['fast_sma'] < data['slow_sma'], 'position'] = -1
|
||||
|
||||
# --- SINGLE SMA PRICE CROSS LOGIC ---
|
||||
else:
|
||||
sma_period = self.fast_ma_period or self.slow_ma_period
|
||||
if len(data) < sma_period + 1:
|
||||
self.current_signal = "INSUFFICIENT DATA"
|
||||
return
|
||||
|
||||
data['sma'] = data['close'].rolling(window=sma_period).mean()
|
||||
self.slow_ma_value = data['sma'].iloc[-1] # Use slow_ma_value to store the single SMA
|
||||
self.fast_ma_value = None # Ensure fast is None
|
||||
|
||||
# Position is 1 when price is above SMA, -1 when below
|
||||
data['position'] = 0
|
||||
data.loc[data['close'] > data['sma'], 'position'] = 1
|
||||
data.loc[data['close'] < data['sma'], 'position'] = -1
|
||||
|
||||
# --- COMMON LOGIC for determining signal and last change ---
|
||||
data['crossover'] = data['position'].diff()
|
||||
last_position = data['position'].iloc[-1]
|
||||
|
||||
if last_position == 1: self.current_signal = "BUY"
|
||||
elif last_position == -1: self.current_signal = "SELL"
|
||||
else: self.current_signal = "HOLD"
|
||||
|
||||
last_cross_series = data[data['crossover'] != 0]
|
||||
if not last_cross_series.empty:
|
||||
last_cross_row = last_cross_series.iloc[-1]
|
||||
self.last_signal_change_utc = last_cross_row.name.tz_localize('UTC').isoformat()
|
||||
self.signal_price = last_cross_row['close']
|
||||
if last_cross_row['position'] == 1: self.current_signal = "BUY"
|
||||
elif last_cross_row['position'] == -1: self.current_signal = "SELL"
|
||||
else:
|
||||
self.last_signal_change_utc = data.index[0].tz_localize('UTC').isoformat()
|
||||
self.signal_price = data['close'].iloc[0]
|
||||
|
||||
def _save_status(self):
|
||||
"""Saves the current strategy state to its JSON file."""
|
||||
status = {
|
||||
"strategy_name": self.strategy_name,
|
||||
"current_signal": self.current_signal,
|
||||
"last_signal_change_utc": self.last_signal_change_utc,
|
||||
"signal_price": self.signal_price,
|
||||
"last_checked_utc": datetime.now(timezone.utc).isoformat()
|
||||
}
|
||||
try:
|
||||
with open(self.status_file_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(status, f, indent=4)
|
||||
except IOError as e:
|
||||
logging.error(f"Failed to write status file: {e}")
|
||||
|
||||
def get_sleep_duration(self) -> int:
|
||||
"""Calculates seconds to sleep until the next full candle closes."""
|
||||
tf_value = int(''.join(filter(str.isdigit, self.timeframe)))
|
||||
tf_unit = ''.join(filter(str.isalpha, self.timeframe))
|
||||
|
||||
if tf_unit == 'm': interval_seconds = tf_value * 60
|
||||
elif tf_unit == 'h': interval_seconds = tf_value * 3600
|
||||
elif tf_unit == 'd': interval_seconds = tf_value * 86400
|
||||
else: return 60
|
||||
|
||||
now = datetime.now(timezone.utc)
|
||||
timestamp = now.timestamp()
|
||||
|
||||
next_candle_ts = ((timestamp // interval_seconds) + 1) * interval_seconds
|
||||
sleep_seconds = (next_candle_ts - timestamp) + 5
|
||||
|
||||
logging.info(f"Next candle closes at {datetime.fromtimestamp(next_candle_ts, tz=timezone.utc)}. "
|
||||
f"Sleeping for {sleep_seconds:.2f} seconds.")
|
||||
return sleep_seconds
|
||||
|
||||
def run_logic(self):
|
||||
"""Main loop: loads data, calculates signals, saves status, and sleeps."""
|
||||
logging.info(f"Starting logic loop for {self.coin} on {self.timeframe} timeframe.")
|
||||
while True:
|
||||
data = self.load_data()
|
||||
if data.empty:
|
||||
logging.warning("No data loaded. Waiting 1 minute before retrying...")
|
||||
self.current_signal = "NO DATA"
|
||||
self._save_status()
|
||||
time.sleep(60)
|
||||
continue
|
||||
|
||||
self._calculate_signals(data)
|
||||
self._save_status()
|
||||
|
||||
last_close = data['close'].iloc[-1]
|
||||
|
||||
# --- Log based on which mode the strategy is running in ---
|
||||
if self.fast_ma_period and self.slow_ma_period:
|
||||
fast_ma_str = f"{self.fast_ma_value:.4f}" if self.fast_ma_value is not None else "N/A"
|
||||
slow_ma_str = f"{self.slow_ma_value:.4f}" if self.slow_ma_value is not None else "N/A"
|
||||
logging.info(
|
||||
f"Signal: {self.current_signal} | Price: {last_close:.4f} | "
|
||||
f"Fast SMA({self.fast_ma_period}): {fast_ma_str} | Slow SMA({self.slow_ma_period}): {slow_ma_str}"
|
||||
)
|
||||
else:
|
||||
sma_period = self.fast_ma_period or self.slow_ma_period
|
||||
sma_val_str = f"{self.slow_ma_value:.4f}" if self.slow_ma_value is not None else "N/A"
|
||||
logging.info(
|
||||
f"Signal: {self.current_signal} | Price: {last_close:.4f} | "
|
||||
f"SMA({sma_period}): {sma_val_str}"
|
||||
)
|
||||
|
||||
sleep_time = self.get_sleep_duration()
|
||||
time.sleep(sleep_time)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Run an SMA Crossover trading strategy.")
|
||||
parser.add_argument("--name", required=True, help="The name of the strategy instance from the config.")
|
||||
parser.add_argument("--params", required=True, help="A JSON string of the strategy's parameters.")
|
||||
parser.add_argument("--log-level", default="normal", choices=['off', 'normal', 'debug'])
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
strategy_params = json.loads(args.params)
|
||||
strategy = SmaCrossStrategy(
|
||||
strategy_name=args.name,
|
||||
params=strategy_params,
|
||||
log_level=args.log_level
|
||||
)
|
||||
strategy.run_logic()
|
||||
except KeyboardInterrupt:
|
||||
logging.info("Strategy process stopped.")
|
||||
except Exception as e:
|
||||
logging.error(f"A critical error occurred: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
186
strategy_template.py
Normal file
186
strategy_template.py
Normal file
@ -0,0 +1,186 @@
|
||||
import argparse
|
||||
import logging
|
||||
import sys
|
||||
import time
|
||||
import pandas as pd
|
||||
import sqlite3
|
||||
import json
|
||||
import os
|
||||
from datetime import datetime, timezone, timedelta
|
||||
|
||||
from logging_utils import setup_logging
|
||||
|
||||
class TradingStrategy:
|
||||
"""
|
||||
A template for a trading strategy that reads data from the SQLite database
|
||||
and executes its logic in a loop, running once per candle.
|
||||
"""
|
||||
|
||||
def __init__(self, strategy_name: str, params: dict, log_level: str):
|
||||
self.strategy_name = strategy_name
|
||||
self.params = params
|
||||
self.coin = params.get("coin", "N/A")
|
||||
self.timeframe = params.get("timeframe", "N/A")
|
||||
self.db_path = os.path.join("_data", "market_data.db")
|
||||
self.status_file_path = os.path.join("_data", f"strategy_status_{self.strategy_name}.json")
|
||||
|
||||
# Strategy state variables
|
||||
self.current_signal = "INIT"
|
||||
self.last_signal_change_utc = None
|
||||
self.signal_price = None
|
||||
self.indicator_value = None
|
||||
|
||||
# Load strategy-specific parameters from config
|
||||
self.rsi_period = params.get("rsi_period")
|
||||
self.short_ma = params.get("short_ma")
|
||||
self.long_ma = params.get("long_ma")
|
||||
self.sma_period = params.get("sma_period")
|
||||
|
||||
setup_logging(log_level, f"Strategy-{self.strategy_name}")
|
||||
logging.info(f"Initializing strategy with parameters: {self.params}")
|
||||
|
||||
def load_data(self) -> pd.DataFrame:
|
||||
"""Loads historical data, ensuring enough for the longest indicator period."""
|
||||
table_name = f"{self.coin}_{self.timeframe}"
|
||||
limit = 500
|
||||
# Determine required data limit based on the longest configured indicator
|
||||
periods = [p for p in [self.sma_period, self.long_ma, self.rsi_period] if p is not None]
|
||||
if periods:
|
||||
limit = max(periods) + 50
|
||||
|
||||
try:
|
||||
with sqlite3.connect(f"file:{self.db_path}?mode=ro", uri=True) as conn:
|
||||
query = f'SELECT * FROM "{table_name}" ORDER BY datetime_utc DESC LIMIT {limit}'
|
||||
df = pd.read_sql(query, conn)
|
||||
if df.empty: return pd.DataFrame()
|
||||
|
||||
df['datetime_utc'] = pd.to_datetime(df['datetime_utc'])
|
||||
df.set_index('datetime_utc', inplace=True)
|
||||
df.sort_index(inplace=True)
|
||||
return df
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to load data from table '{table_name}': {e}")
|
||||
return pd.DataFrame()
|
||||
|
||||
def _calculate_signals(self, data: pd.DataFrame):
|
||||
"""
|
||||
Analyzes historical data to find the last signal crossover event.
|
||||
This method should be expanded to handle different strategy types.
|
||||
"""
|
||||
if self.sma_period:
|
||||
if len(data) < self.sma_period + 1:
|
||||
self.current_signal = "INSUFFICIENT DATA"
|
||||
return
|
||||
|
||||
data['sma'] = data['close'].rolling(window=self.sma_period).mean()
|
||||
self.indicator_value = data['sma'].iloc[-1]
|
||||
|
||||
data['position'] = 0
|
||||
data.loc[data['close'] > data['sma'], 'position'] = 1
|
||||
data.loc[data['close'] < data['sma'], 'position'] = -1
|
||||
data['crossover'] = data['position'].diff()
|
||||
|
||||
last_position = data['position'].iloc[-1]
|
||||
if last_position == 1: self.current_signal = "BUY"
|
||||
elif last_position == -1: self.current_signal = "SELL"
|
||||
else: self.current_signal = "HOLD"
|
||||
|
||||
last_cross_series = data[data['crossover'] != 0]
|
||||
if not last_cross_series.empty:
|
||||
last_cross_row = last_cross_series.iloc[-1]
|
||||
self.last_signal_change_utc = last_cross_row.name.tz_localize('UTC').isoformat()
|
||||
self.signal_price = last_cross_row['close']
|
||||
if last_cross_row['position'] == 1: self.current_signal = "BUY"
|
||||
elif last_cross_row['position'] == -1: self.current_signal = "SELL"
|
||||
else:
|
||||
self.last_signal_change_utc = data.index[0].tz_localize('UTC').isoformat()
|
||||
self.signal_price = data['close'].iloc[0]
|
||||
|
||||
elif self.rsi_period:
|
||||
logging.info(f"RSI logic not implemented for period {self.rsi_period}.")
|
||||
self.current_signal = "NOT IMPLEMENTED"
|
||||
|
||||
elif self.short_ma and self.long_ma:
|
||||
logging.info(f"MA Cross logic not implemented for {self.short_ma}/{self.long_ma}.")
|
||||
self.current_signal = "NOT IMPLEMENTED"
|
||||
|
||||
def _save_status(self):
|
||||
"""Saves the current strategy state to its JSON file."""
|
||||
status = {
|
||||
"strategy_name": self.strategy_name,
|
||||
"current_signal": self.current_signal,
|
||||
"last_signal_change_utc": self.last_signal_change_utc,
|
||||
"signal_price": self.signal_price,
|
||||
"last_checked_utc": datetime.now(timezone.utc).isoformat()
|
||||
}
|
||||
try:
|
||||
with open(self.status_file_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(status, f, indent=4)
|
||||
except IOError as e:
|
||||
logging.error(f"Failed to write status file: {e}")
|
||||
|
||||
def get_sleep_duration(self) -> int:
|
||||
"""Calculates seconds to sleep until the next full candle closes."""
|
||||
if not self.timeframe: return 60
|
||||
tf_value = int(''.join(filter(str.isdigit, self.timeframe)))
|
||||
tf_unit = ''.join(filter(str.isalpha, self.timeframe))
|
||||
|
||||
if tf_unit == 'm': interval_seconds = tf_value * 60
|
||||
elif tf_unit == 'h': interval_seconds = tf_value * 3600
|
||||
elif tf_unit == 'd': interval_seconds = tf_value * 86400
|
||||
else: return 60
|
||||
|
||||
now = datetime.now(timezone.utc)
|
||||
timestamp = now.timestamp()
|
||||
|
||||
next_candle_ts = ((timestamp // interval_seconds) + 1) * interval_seconds
|
||||
sleep_seconds = (next_candle_ts - timestamp) + 5
|
||||
|
||||
logging.info(f"Next candle closes at {datetime.fromtimestamp(next_candle_ts, tz=timezone.utc)}. "
|
||||
f"Sleeping for {sleep_seconds:.2f} seconds.")
|
||||
return sleep_seconds
|
||||
|
||||
def run_logic(self):
|
||||
"""Main loop: loads data, calculates signals, saves status, and sleeps."""
|
||||
logging.info(f"Starting main logic loop for {self.coin} on {self.timeframe} timeframe.")
|
||||
while True:
|
||||
data = self.load_data()
|
||||
if data.empty:
|
||||
logging.warning("No data loaded. Waiting 1 minute before retrying...")
|
||||
self.current_signal = "NO DATA"
|
||||
self._save_status()
|
||||
time.sleep(60)
|
||||
continue
|
||||
|
||||
self._calculate_signals(data)
|
||||
self._save_status()
|
||||
|
||||
last_close = data['close'].iloc[-1]
|
||||
indicator_val_str = f"{self.indicator_value:.4f}" if self.indicator_value is not None else "N/A"
|
||||
logging.info(f"Signal: {self.current_signal} | Price: {last_close:.4f} | Indicator: {indicator_val_str}")
|
||||
|
||||
sleep_time = self.get_sleep_duration()
|
||||
time.sleep(sleep_time)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Run a trading strategy.")
|
||||
parser.add_argument("--name", required=True, help="The name of the strategy instance from the config.")
|
||||
parser.add_argument("--params", required=True, help="A JSON string of the strategy's parameters.")
|
||||
parser.add_argument("--log-level", default="normal", choices=['off', 'normal', 'debug'])
|
||||
args = parser.parse_args()
|
||||
|
||||
try:
|
||||
strategy_params = json.loads(args.params)
|
||||
strategy = TradingStrategy(
|
||||
strategy_name=args.name,
|
||||
params=strategy_params,
|
||||
log_level=args.log_level
|
||||
)
|
||||
strategy.run_logic()
|
||||
except KeyboardInterrupt:
|
||||
logging.info("Strategy process stopped.")
|
||||
except Exception as e:
|
||||
logging.error(f"A critical error occurred: {e}")
|
||||
sys.exit(1)
|
||||
|
||||
193
trade_executor.py
Normal file
193
trade_executor.py
Normal file
@ -0,0 +1,193 @@
|
||||
import argparse
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import time
|
||||
# --- REVERTED: Removed math import ---
|
||||
from datetime import datetime
|
||||
import multiprocessing
|
||||
|
||||
from eth_account import Account
|
||||
from hyperliquid.exchange import Exchange
|
||||
from hyperliquid.info import Info
|
||||
from hyperliquid.utils import constants
|
||||
from dotenv import load_dotenv
|
||||
|
||||
from logging_utils import setup_logging
|
||||
|
||||
load_dotenv()
|
||||
|
||||
class TradeExecutor:
|
||||
"""
|
||||
Executes orders from a queue and, upon API success,
|
||||
updates the shared 'opened_positions.json' state file.
|
||||
It is the single source of truth for position state.
|
||||
"""
|
||||
|
||||
def __init__(self, log_level: str, order_execution_queue: multiprocessing.Queue):
|
||||
# Note: Logging is set up by the run_trade_executor function
|
||||
|
||||
self.order_execution_queue = order_execution_queue
|
||||
|
||||
self.vault_address = os.environ.get("MAIN_WALLET_ADDRESS")
|
||||
if not self.vault_address:
|
||||
logging.error("MAIN_WALLET_ADDRESS not set.")
|
||||
sys.exit(1)
|
||||
|
||||
self.info = Info(constants.MAINNET_API_URL, skip_ws=True)
|
||||
self.exchanges = self._load_agents()
|
||||
if not self.exchanges:
|
||||
logging.error("No trading agents found in .env file.")
|
||||
sys.exit(1)
|
||||
|
||||
# --- REVERTED: Removed asset_meta loading ---
|
||||
# self.asset_meta = self._load_asset_metadata()
|
||||
|
||||
# --- NEW: State management logic ---
|
||||
self.opened_positions_file = os.path.join("_data", "opened_positions.json")
|
||||
self.opened_positions = self._load_opened_positions()
|
||||
|
||||
logging.info(f"Trade Executor started. Loaded {len(self.opened_positions)} positions.")
|
||||
|
||||
|
||||
def _load_agents(self) -> dict:
|
||||
# ... (omitted for brevity, this logic is correct and unchanged) ...
|
||||
exchanges = {}
|
||||
logging.info("Discovering agents from environment variables...")
|
||||
for env_var, private_key in os.environ.items():
|
||||
agent_name = None
|
||||
if env_var == "AGENT_PRIVATE_KEY":
|
||||
agent_name = "default"
|
||||
elif env_var.endswith("_AGENT_PK"):
|
||||
agent_name = env_var.replace("_AGENT_PK", "").lower()
|
||||
|
||||
if agent_name and private_key:
|
||||
try:
|
||||
agent_account = Account.from_key(private_key)
|
||||
exchanges[agent_name] = Exchange(agent_account, constants.MAINNET_API_URL, account_address=self.vault_address)
|
||||
logging.info(f"Initialized agent '{agent_name}' with address: {agent_account.address}")
|
||||
except Exception as e:
|
||||
logging.error(f"Failed to initialize agent '{agent_name}': {e}")
|
||||
return exchanges
|
||||
|
||||
# --- REVERTED: Removed asset metadata loading ---
|
||||
# def _load_asset_metadata(self) -> dict: ...
|
||||
|
||||
# --- NEW: Position state save/load methods ---
|
||||
def _load_opened_positions(self) -> dict:
|
||||
"""Loads the state of currently managed positions from a JSON file."""
|
||||
if not os.path.exists(self.opened_positions_file):
|
||||
return {}
|
||||
try:
|
||||
with open(self.opened_positions_file, 'r', encoding='utf-8') as f:
|
||||
return json.load(f)
|
||||
except (json.JSONDecodeError, IOError) as e:
|
||||
logging.error(f"Failed to read '{self.opened_positions_file}': {e}. Starting with empty state.", exc_info=True)
|
||||
return {}
|
||||
|
||||
def _save_opened_positions(self):
|
||||
"""Saves the current state of managed positions to a JSON file."""
|
||||
try:
|
||||
with open(self.opened_positions_file, 'w', encoding='utf-8') as f:
|
||||
json.dump(self.opened_positions, f, indent=4)
|
||||
logging.debug(f"Successfully saved {len(self.opened_positions)} positions to '{self.opened_positions_file}'")
|
||||
except IOError as e:
|
||||
logging.error(f"Failed to write to '{self.opened_positions_file}': {e}", exc_info=True)
|
||||
|
||||
# --- REVERTED: Removed tick rounding function ---
|
||||
# def _round_to_tick(self, price, tick_size): ...
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
Main execution loop. Waits for an order and updates state on success.
|
||||
"""
|
||||
logging.info("Trade Executor started. Waiting for orders...")
|
||||
while True:
|
||||
try:
|
||||
order = self.order_execution_queue.get()
|
||||
if not order:
|
||||
continue
|
||||
|
||||
logging.info(f"Received order: {order}")
|
||||
|
||||
agent_name = order['agent']
|
||||
action = order['action']
|
||||
coin = order['coin']
|
||||
is_buy = order['is_buy']
|
||||
size = order['size']
|
||||
limit_px = order.get('limit_px')
|
||||
|
||||
exchange_to_use = self.exchanges.get(agent_name)
|
||||
if not exchange_to_use:
|
||||
logging.error(f"Agent '{agent_name}' not found. Skipping order.")
|
||||
continue
|
||||
|
||||
response = None
|
||||
|
||||
if action == "market_open" or action == "market_close":
|
||||
reduce_only = (action == "market_close")
|
||||
log_action = "MARKET CLOSE" if reduce_only else "MARKET OPEN"
|
||||
logging.warning(f"ACTION: {log_action} {coin} {'BUY' if is_buy else 'SELL'} {size}")
|
||||
|
||||
# --- REVERTED: Removed all slippage and rounding logic ---
|
||||
# The raw limit_px from the order is now used directly
|
||||
final_price = limit_px
|
||||
logging.info(f"[{agent_name}] Using raw price for {coin}: {final_price}")
|
||||
|
||||
order_type = {"limit": {"tif": "Ioc"}}
|
||||
# --- REVERTED: Uses final_price (which is just limit_px) ---
|
||||
response = exchange_to_use.order(coin, is_buy, size, final_price, order_type, reduce_only=reduce_only)
|
||||
logging.info(f"Market order response: {response}")
|
||||
|
||||
# --- NEW: STATE UPDATE ON SUCCESS ---
|
||||
if response.get("status") == "ok":
|
||||
response_data = response.get("response", {},).get("data", {})
|
||||
if response_data and "statuses" in response_data:
|
||||
# Check if the order status contains an error
|
||||
if "error" not in response_data["statuses"][0]:
|
||||
position_key = order['position_key']
|
||||
if action == "market_open":
|
||||
# Add to state
|
||||
self.opened_positions[position_key] = {
|
||||
"strategy": order['strategy'],
|
||||
"coin": coin,
|
||||
"side": "long" if is_buy else "short",
|
||||
"open_time_utc": order['open_time_utc'],
|
||||
"open_price": order['open_price'],
|
||||
"amount": order['amount'],
|
||||
# --- MODIFIED: Read leverage from the order ---
|
||||
"leverage": order.get('leverage')
|
||||
}
|
||||
logging.info(f"Successfully opened position {position_key}. Saving state.")
|
||||
elif action == "market_close":
|
||||
# Remove from state
|
||||
if position_key in self.opened_positions:
|
||||
del self.opened_positions[position_key]
|
||||
logging.info(f"Successfully closed position {position_key}. Saving state.")
|
||||
else:
|
||||
logging.warning(f"Received close confirmation for {position_key}, but it was not in state.")
|
||||
|
||||
self._save_opened_positions() # Save state to disk
|
||||
|
||||
else:
|
||||
logging.error(f"API Error for {action}: {response_data['statuses'][0]['error']}")
|
||||
else:
|
||||
logging.error(f"Unexpected API response format: {response}")
|
||||
else:
|
||||
logging.error(f"API call failed, status: {response.get('status')}")
|
||||
|
||||
|
||||
elif action == "update_leverage":
|
||||
leverage = int(size)
|
||||
logging.warning(f"ACTION: UPDATE LEVERAGE {coin} to {leverage}x")
|
||||
response = exchange_to_use.update_leverage(leverage, coin)
|
||||
logging.info(f"Update leverage response: {response}")
|
||||
|
||||
else:
|
||||
logging.warning(f"Received unknown action: {action}")
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"An error occurred in the main executor loop: {e}", exc_info=True)
|
||||
time.sleep(1)
|
||||
|
||||
55
trade_log.py
Normal file
55
trade_log.py
Normal file
@ -0,0 +1,55 @@
|
||||
import os
|
||||
import csv
|
||||
from datetime import datetime, timezone
|
||||
import threading
|
||||
|
||||
# A lock to prevent race conditions when multiple strategies might log at once in the future
|
||||
log_lock = threading.Lock()
|
||||
|
||||
def log_trade(strategy: str, coin: str, action: str, price: float, size: float, signal: str, pnl: float = 0.0):
|
||||
"""
|
||||
Appends a record of a trade action to a persistent CSV log file.
|
||||
|
||||
Args:
|
||||
strategy (str): The name of the strategy that triggered the action.
|
||||
coin (str): The coin being traded (e.g., 'BTC').
|
||||
action (str): The action taken (e.g., 'OPEN_LONG', 'CLOSE_LONG').
|
||||
price (float): The execution price of the trade.
|
||||
size (float): The size of the trade.
|
||||
signal (str): The signal that triggered the trade (e.g., 'BUY', 'SELL').
|
||||
pnl (float, optional): The realized profit and loss for closing trades. Defaults to 0.0.
|
||||
"""
|
||||
log_dir = "_logs"
|
||||
file_path = os.path.join(log_dir, "trade_history.csv")
|
||||
|
||||
# Ensure the logs directory exists
|
||||
if not os.path.exists(log_dir):
|
||||
os.makedirs(log_dir)
|
||||
|
||||
# Define the headers for the CSV file
|
||||
headers = ["timestamp_utc", "strategy", "coin", "action", "price", "size", "signal", "pnl"]
|
||||
|
||||
# Check if the file needs a header
|
||||
file_exists = os.path.isfile(file_path)
|
||||
|
||||
with log_lock:
|
||||
try:
|
||||
with open(file_path, 'a', newline='', encoding='utf-8') as f:
|
||||
writer = csv.DictWriter(f, fieldnames=headers)
|
||||
|
||||
if not file_exists:
|
||||
writer.writeheader()
|
||||
|
||||
writer.writerow({
|
||||
"timestamp_utc": datetime.now(timezone.utc).isoformat(),
|
||||
"strategy": strategy,
|
||||
"coin": coin,
|
||||
"action": action,
|
||||
"price": price,
|
||||
"size": size,
|
||||
"signal": signal,
|
||||
"pnl": pnl
|
||||
})
|
||||
except IOError as e:
|
||||
# If logging fails, print an error to the main console as a fallback.
|
||||
print(f"CRITICAL: Failed to write to trade log file: {e}")
|
||||
652
wallet_data.py
Normal file
652
wallet_data.py
Normal file
@ -0,0 +1,652 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Hyperliquid Wallet Data Fetcher - FINAL Perfect Alignment
|
||||
==========================================================
|
||||
Complete Python script to pull all available data for a Hyperliquid wallet via API.
|
||||
|
||||
Requirements:
|
||||
pip install hyperliquid-python-sdk
|
||||
|
||||
Usage:
|
||||
python hyperliquid_wallet_data.py <wallet_address>
|
||||
|
||||
Example:
|
||||
python hyperliquid_wallet_data.py 0xcd5051944f780a621ee62e39e493c489668acf4d
|
||||
"""
|
||||
|
||||
import sys
|
||||
import json
|
||||
from datetime import datetime, timedelta
|
||||
from typing import Optional, Dict, Any
|
||||
from hyperliquid.info import Info
|
||||
from hyperliquid.utils import constants
|
||||
|
||||
|
||||
class HyperliquidWalletAnalyzer:
|
||||
"""
|
||||
Comprehensive wallet data analyzer for Hyperliquid exchange.
|
||||
Fetches all available information about a specific wallet address.
|
||||
"""
|
||||
|
||||
def __init__(self, wallet_address: str, use_testnet: bool = False):
|
||||
"""
|
||||
Initialize the analyzer with a wallet address.
|
||||
|
||||
Args:
|
||||
wallet_address: Ethereum-style address (0x...)
|
||||
use_testnet: If True, use testnet instead of mainnet
|
||||
"""
|
||||
self.wallet_address = wallet_address
|
||||
api_url = constants.TESTNET_API_URL if use_testnet else constants.MAINNET_API_URL
|
||||
|
||||
# Initialize Info API (read-only, no private keys needed)
|
||||
self.info = Info(api_url, skip_ws=True)
|
||||
print(f"Initialized Hyperliquid API: {'Testnet' if use_testnet else 'Mainnet'}")
|
||||
print(f"Target wallet: {wallet_address}\n")
|
||||
|
||||
def print_position_details(self, position: Dict[str, Any], index: int):
|
||||
"""
|
||||
Print detailed information about a single position.
|
||||
|
||||
Args:
|
||||
position: Position data dictionary
|
||||
index: Position number for display
|
||||
"""
|
||||
pos = position.get('position', {})
|
||||
|
||||
# Extract all position details
|
||||
coin = pos.get('coin', 'Unknown')
|
||||
size = float(pos.get('szi', 0))
|
||||
entry_px = float(pos.get('entryPx', 0))
|
||||
position_value = float(pos.get('positionValue', 0))
|
||||
unrealized_pnl = float(pos.get('unrealizedPnl', 0))
|
||||
return_on_equity = float(pos.get('returnOnEquity', 0))
|
||||
|
||||
# Leverage details
|
||||
leverage = pos.get('leverage', {})
|
||||
leverage_type = leverage.get('type', 'unknown') if isinstance(leverage, dict) else 'cross'
|
||||
leverage_value = leverage.get('value', 0) if isinstance(leverage, dict) else 0
|
||||
|
||||
# Margin and liquidation
|
||||
margin_used = float(pos.get('marginUsed', 0))
|
||||
liquidation_px = pos.get('liquidationPx')
|
||||
max_trade_szs = pos.get('maxTradeSzs', [0, 0])
|
||||
|
||||
# Cumulative funding
|
||||
cumulative_funding = float(pos.get('cumFunding', {}).get('allTime', 0))
|
||||
|
||||
# Determine if long or short
|
||||
side = "LONG 📈" if size > 0 else "SHORT 📉"
|
||||
side_color = "🟢" if size > 0 else "🔴"
|
||||
|
||||
# PnL color
|
||||
pnl_symbol = "🟢" if unrealized_pnl >= 0 else "🔴"
|
||||
pnl_sign = "+" if unrealized_pnl >= 0 else ""
|
||||
|
||||
# ROE color
|
||||
roe_symbol = "🟢" if return_on_equity >= 0 else "🔴"
|
||||
roe_sign = "+" if return_on_equity >= 0 else ""
|
||||
|
||||
print(f"\n{'='*80}")
|
||||
print(f"POSITION #{index}: {coin} {side} {side_color}")
|
||||
print(f"{'='*80}")
|
||||
|
||||
print(f"\n📊 POSITION DETAILS:")
|
||||
print(f" Size: {abs(size):.6f} {coin}")
|
||||
print(f" Side: {side}")
|
||||
print(f" Entry Price: ${entry_px:,.4f}")
|
||||
print(f" Position Value: ${abs(position_value):,.2f}")
|
||||
|
||||
print(f"\n💰 PROFITABILITY:")
|
||||
print(f" Unrealized PnL: {pnl_symbol} {pnl_sign}${unrealized_pnl:,.2f}")
|
||||
print(f" Return on Equity: {roe_symbol} {roe_sign}{return_on_equity:.2%}")
|
||||
print(f" Cumulative Funding: ${cumulative_funding:,.4f}")
|
||||
|
||||
print(f"\n⚙️ LEVERAGE & MARGIN:")
|
||||
print(f" Leverage Type: {leverage_type.upper()}")
|
||||
print(f" Leverage: {leverage_value}x")
|
||||
print(f" Margin Used: ${margin_used:,.2f}")
|
||||
|
||||
print(f"\n⚠️ RISK MANAGEMENT:")
|
||||
if liquidation_px:
|
||||
liquidation_px_float = float(liquidation_px) if liquidation_px else 0
|
||||
print(f" Liquidation Price: ${liquidation_px_float:,.4f}")
|
||||
|
||||
# Calculate distance to liquidation
|
||||
if entry_px > 0 and liquidation_px_float > 0:
|
||||
if size > 0: # Long position
|
||||
distance = ((entry_px - liquidation_px_float) / entry_px) * 100
|
||||
else: # Short position
|
||||
distance = ((liquidation_px_float - entry_px) / entry_px) * 100
|
||||
|
||||
distance_symbol = "🟢" if abs(distance) > 20 else "🟡" if abs(distance) > 10 else "🔴"
|
||||
print(f" Distance to Liq: {distance_symbol} {abs(distance):.2f}%")
|
||||
else:
|
||||
print(f" Liquidation Price: N/A (Cross margin)")
|
||||
|
||||
if max_trade_szs and len(max_trade_szs) == 2:
|
||||
print(f" Max Long Trade: {max_trade_szs[0]}")
|
||||
print(f" Max Short Trade: {max_trade_szs[1]}")
|
||||
|
||||
print(f"\n{'='*80}")
|
||||
|
||||
def get_user_state(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Get complete user state including positions and margin summary.
|
||||
|
||||
Returns:
|
||||
Dict containing:
|
||||
- assetPositions: List of open perpetual positions
|
||||
- marginSummary: Account value, margin used, withdrawable
|
||||
- crossMarginSummary: Cross margin details
|
||||
- withdrawable: Available balance to withdraw
|
||||
"""
|
||||
print("📊 Fetching User State (Perpetuals)...")
|
||||
try:
|
||||
data = self.info.user_state(self.wallet_address)
|
||||
|
||||
if data:
|
||||
margin_summary = data.get('marginSummary', {})
|
||||
positions = data.get('assetPositions', [])
|
||||
|
||||
account_value = float(margin_summary.get('accountValue', 0))
|
||||
total_margin_used = float(margin_summary.get('totalMarginUsed', 0))
|
||||
total_ntl_pos = float(margin_summary.get('totalNtlPos', 0))
|
||||
total_raw_usd = float(margin_summary.get('totalRawUsd', 0))
|
||||
withdrawable = float(data.get('withdrawable', 0))
|
||||
|
||||
print(f" ✓ Account Value: ${account_value:,.2f}")
|
||||
print(f" ✓ Total Margin Used: ${total_margin_used:,.2f}")
|
||||
print(f" ✓ Total Position Value: ${total_ntl_pos:,.2f}")
|
||||
print(f" ✓ Withdrawable: ${withdrawable:,.2f}")
|
||||
print(f" ✓ Open Positions: {len(positions)}")
|
||||
|
||||
# Calculate margin utilization
|
||||
if account_value > 0:
|
||||
margin_util = (total_margin_used / account_value) * 100
|
||||
util_symbol = "🟢" if margin_util < 50 else "🟡" if margin_util < 75 else "🔴"
|
||||
print(f" ✓ Margin Utilization: {util_symbol} {margin_util:.2f}%")
|
||||
|
||||
# Print detailed information for each position
|
||||
if positions:
|
||||
print(f"\n{'='*80}")
|
||||
print(f"DETAILED POSITION BREAKDOWN ({len(positions)} positions)")
|
||||
print(f"{'='*80}")
|
||||
|
||||
for idx, position in enumerate(positions, 1):
|
||||
self.print_position_details(position, idx)
|
||||
|
||||
# Summary table with perfect alignment
|
||||
self.print_positions_summary_table(positions)
|
||||
|
||||
else:
|
||||
print(" ⚠ No perpetual positions found")
|
||||
|
||||
return data
|
||||
except Exception as e:
|
||||
print(f" ✗ Error: {e}")
|
||||
return {}
|
||||
|
||||
def print_positions_summary_table(self, positions: list):
|
||||
"""
|
||||
Print a summary table of all positions with perfectly aligned columns.
|
||||
NO emojis in data cells - keeps them simple text only for perfect alignment.
|
||||
|
||||
Args:
|
||||
positions: List of position dictionaries
|
||||
"""
|
||||
print(f"\n{'='*130}")
|
||||
print("POSITIONS SUMMARY TABLE")
|
||||
print('='*130)
|
||||
|
||||
# Print header
|
||||
print("| Asset | Side | Size | Entry Price | Position Value | Unrealized PnL | ROE | Leverage |")
|
||||
print("|----------|-------|-------------------|-------------------|-------------------|-------------------|------------|------------|")
|
||||
|
||||
total_position_value = 0
|
||||
total_pnl = 0
|
||||
|
||||
for position in positions:
|
||||
pos = position.get('position', {})
|
||||
|
||||
coin = pos.get('coin', 'Unknown')
|
||||
size = float(pos.get('szi', 0))
|
||||
entry_px = float(pos.get('entryPx', 0))
|
||||
position_value = float(pos.get('positionValue', 0))
|
||||
unrealized_pnl = float(pos.get('unrealizedPnl', 0))
|
||||
return_on_equity = float(pos.get('returnOnEquity', 0))
|
||||
|
||||
# Get leverage
|
||||
leverage = pos.get('leverage', {})
|
||||
leverage_value = leverage.get('value', 0) if isinstance(leverage, dict) else 0
|
||||
leverage_type = leverage.get('type', 'cross') if isinstance(leverage, dict) else 'cross'
|
||||
|
||||
# Determine side - NO EMOJIS in data
|
||||
side_text = "LONG" if size > 0 else "SHORT"
|
||||
|
||||
# Format PnL and ROE with signs
|
||||
pnl_sign = "+" if unrealized_pnl >= 0 else ""
|
||||
roe_sign = "+" if return_on_equity >= 0 else ""
|
||||
|
||||
# Accumulate totals
|
||||
total_position_value += abs(position_value)
|
||||
total_pnl += unrealized_pnl
|
||||
|
||||
# Format all values as strings with proper width
|
||||
asset_str = f"{coin[:8]:<8}"
|
||||
side_str = f"{side_text:<5}"
|
||||
size_str = f"{abs(size):>17,.4f}"
|
||||
entry_str = f"${entry_px:>16,.2f}"
|
||||
value_str = f"${abs(position_value):>16,.2f}"
|
||||
pnl_str = f"{pnl_sign}${unrealized_pnl:>15,.2f}"
|
||||
roe_str = f"{roe_sign}{return_on_equity:>9.2%}"
|
||||
lev_str = f"{leverage_value}x {leverage_type[:4]}"
|
||||
|
||||
# Print row with exact spacing
|
||||
print(f"| {asset_str} | {side_str} | {size_str} | {entry_str} | {value_str} | {pnl_str} | {roe_str} | {lev_str:<10} |")
|
||||
|
||||
# Separator before totals
|
||||
print("|==========|=======|===================|===================|===================|===================|============|============|")
|
||||
|
||||
# Total row
|
||||
total_value_str = f"${total_position_value:>16,.2f}"
|
||||
total_pnl_sign = "+" if total_pnl >= 0 else ""
|
||||
total_pnl_str = f"{total_pnl_sign}${total_pnl:>15,.2f}"
|
||||
|
||||
print(f"| TOTAL | | | | {total_value_str} | {total_pnl_str} | | |")
|
||||
print('='*130 + '\n')
|
||||
|
||||
def get_spot_state(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Get spot trading state including token balances.
|
||||
|
||||
Returns:
|
||||
Dict containing:
|
||||
- balances: List of spot token holdings
|
||||
"""
|
||||
print("\n💰 Fetching Spot State...")
|
||||
try:
|
||||
data = self.info.spot_user_state(self.wallet_address)
|
||||
|
||||
if data and data.get('balances'):
|
||||
print(f" ✓ Spot Holdings: {len(data['balances'])} tokens")
|
||||
for balance in data['balances'][:5]: # Show first 5
|
||||
print(f" - {balance.get('coin', 'Unknown')}: {balance.get('total', 0)}")
|
||||
else:
|
||||
print(" ⚠ No spot holdings found")
|
||||
|
||||
return data
|
||||
except Exception as e:
|
||||
print(f" ✗ Error: {e}")
|
||||
return {}
|
||||
|
||||
def get_open_orders(self) -> list:
|
||||
"""
|
||||
Get all open orders for the user.
|
||||
|
||||
Returns:
|
||||
List of open orders with details (price, size, side, etc.)
|
||||
"""
|
||||
print("\n📋 Fetching Open Orders...")
|
||||
try:
|
||||
data = self.info.open_orders(self.wallet_address)
|
||||
|
||||
if data:
|
||||
print(f" ✓ Open Orders: {len(data)}")
|
||||
for order in data[:3]: # Show first 3
|
||||
coin = order.get('coin', 'Unknown')
|
||||
side = order.get('side', 'Unknown')
|
||||
size = order.get('sz', 0)
|
||||
price = order.get('limitPx', 0)
|
||||
print(f" - {coin} {side}: {size} @ ${price}")
|
||||
else:
|
||||
print(" ⚠ No open orders")
|
||||
|
||||
return data
|
||||
except Exception as e:
|
||||
print(f" ✗ Error: {e}")
|
||||
return []
|
||||
|
||||
def get_user_fills(self, limit: int = 100) -> list:
|
||||
"""
|
||||
Get recent trade fills (executions).
|
||||
|
||||
Args:
|
||||
limit: Maximum number of fills to retrieve (max 2000)
|
||||
|
||||
Returns:
|
||||
List of fills with execution details, PnL, timestamps
|
||||
"""
|
||||
print(f"\n📈 Fetching Recent Fills (last {limit})...")
|
||||
try:
|
||||
data = self.info.user_fills(self.wallet_address)
|
||||
|
||||
if data:
|
||||
fills = data[:limit]
|
||||
print(f" ✓ Total Fills Retrieved: {len(fills)}")
|
||||
|
||||
# Show summary stats
|
||||
total_pnl = sum(float(f.get('closedPnl', 0)) for f in fills if f.get('closedPnl'))
|
||||
print(f" ✓ Total Closed PnL: ${total_pnl:.2f}")
|
||||
|
||||
# Show most recent
|
||||
if fills:
|
||||
recent = fills[0]
|
||||
print(f" ✓ Most Recent: {recent.get('coin')} {recent.get('side')} {recent.get('sz')} @ ${recent.get('px')}")
|
||||
else:
|
||||
print(" ⚠ No fills found")
|
||||
|
||||
return data[:limit] if data else []
|
||||
except Exception as e:
|
||||
print(f" ✗ Error: {e}")
|
||||
return []
|
||||
|
||||
def get_user_fills_by_time(self, start_time: Optional[int] = None,
|
||||
end_time: Optional[int] = None) -> list:
|
||||
"""
|
||||
Get fills within a specific time range.
|
||||
|
||||
Args:
|
||||
start_time: Start timestamp in milliseconds (default: 7 days ago)
|
||||
end_time: End timestamp in milliseconds (default: now)
|
||||
|
||||
Returns:
|
||||
List of fills within the time range
|
||||
"""
|
||||
if not start_time:
|
||||
start_time = int((datetime.now() - timedelta(days=7)).timestamp() * 1000)
|
||||
if not end_time:
|
||||
end_time = int(datetime.now().timestamp() * 1000)
|
||||
|
||||
print(f"\n📅 Fetching Fills by Time Range...")
|
||||
print(f" From: {datetime.fromtimestamp(start_time/1000)}")
|
||||
print(f" To: {datetime.fromtimestamp(end_time/1000)}")
|
||||
|
||||
try:
|
||||
data = self.info.user_fills_by_time(self.wallet_address, start_time, end_time)
|
||||
|
||||
if data:
|
||||
print(f" ✓ Fills in Range: {len(data)}")
|
||||
else:
|
||||
print(" ⚠ No fills in this time range")
|
||||
|
||||
return data
|
||||
except Exception as e:
|
||||
print(f" ✗ Error: {e}")
|
||||
return []
|
||||
|
||||
def get_user_fees(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Get user's fee schedule and trading volume.
|
||||
|
||||
Returns:
|
||||
Dict containing:
|
||||
- feeSchedule: Fee rates by tier
|
||||
- userCrossRate: User's current cross trading fee rate
|
||||
- userAddRate: User's maker fee rate
|
||||
- userWithdrawRate: Withdrawal fee rate
|
||||
- dailyUserVlm: Daily trading volume
|
||||
"""
|
||||
print("\n💳 Fetching Fee Information...")
|
||||
try:
|
||||
data = self.info.user_fees(self.wallet_address)
|
||||
|
||||
if data:
|
||||
print(f" ✓ Maker Fee: {data.get('userAddRate', 0)}%")
|
||||
print(f" ✓ Taker Fee: {data.get('userCrossRate', 0)}%")
|
||||
print(f" ✓ Daily Volume: ${data.get('dailyUserVlm', [0])[0] if data.get('dailyUserVlm') else 0}")
|
||||
|
||||
return data
|
||||
except Exception as e:
|
||||
print(f" ✗ Error: {e}")
|
||||
return {}
|
||||
|
||||
def get_user_rate_limit(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Get API rate limit information.
|
||||
|
||||
Returns:
|
||||
Dict containing:
|
||||
- cumVlm: Cumulative trading volume
|
||||
- nRequestsUsed: Number of requests used
|
||||
- nRequestsCap: Request capacity
|
||||
"""
|
||||
print("\n⏱️ Fetching Rate Limit Info...")
|
||||
try:
|
||||
data = self.info.user_rate_limit(self.wallet_address)
|
||||
|
||||
if data:
|
||||
used = data.get('nRequestsUsed', 0)
|
||||
cap = data.get('nRequestsCap', 0)
|
||||
print(f" ✓ API Requests: {used}/{cap}")
|
||||
print(f" ✓ Cumulative Volume: ${data.get('cumVlm', 0)}")
|
||||
|
||||
return data
|
||||
except Exception as e:
|
||||
print(f" ✗ Error: {e}")
|
||||
return {}
|
||||
|
||||
def get_funding_history(self, coin: str, days: int = 7) -> list:
|
||||
"""
|
||||
Get funding rate history for a specific coin.
|
||||
|
||||
Args:
|
||||
coin: Asset symbol (e.g., 'BTC', 'ETH')
|
||||
days: Number of days of history (default: 7)
|
||||
|
||||
Returns:
|
||||
List of funding rate entries
|
||||
"""
|
||||
end_time = int(datetime.now().timestamp() * 1000)
|
||||
start_time = int((datetime.now() - timedelta(days=days)).timestamp() * 1000)
|
||||
|
||||
print(f"\n📊 Fetching Funding History for {coin}...")
|
||||
try:
|
||||
data = self.info.funding_history(coin, start_time, end_time)
|
||||
|
||||
if data:
|
||||
print(f" ✓ Funding Entries: {len(data)}")
|
||||
if data:
|
||||
latest = data[-1]
|
||||
print(f" ✓ Latest Rate: {latest.get('fundingRate', 0)}")
|
||||
|
||||
return data
|
||||
except Exception as e:
|
||||
print(f" ✗ Error: {e}")
|
||||
return []
|
||||
|
||||
def get_user_funding_history(self, days: int = 7) -> list:
|
||||
"""
|
||||
Get user's funding payments history.
|
||||
|
||||
Args:
|
||||
days: Number of days of history (default: 7)
|
||||
|
||||
Returns:
|
||||
List of funding payments
|
||||
"""
|
||||
end_time = int(datetime.now().timestamp() * 1000)
|
||||
start_time = int((datetime.now() - timedelta(days=days)).timestamp() * 1000)
|
||||
|
||||
print(f"\n💸 Fetching User Funding Payments (last {days} days)...")
|
||||
try:
|
||||
data = self.info.user_funding_history(self.wallet_address, start_time, end_time)
|
||||
|
||||
if data:
|
||||
print(f" ✓ Funding Payments: {len(data)}")
|
||||
total_funding = sum(float(f.get('usdc', 0)) for f in data)
|
||||
print(f" ✓ Total Funding P&L: ${total_funding:.2f}")
|
||||
else:
|
||||
print(" ⚠ No funding payments found")
|
||||
|
||||
return data
|
||||
except Exception as e:
|
||||
print(f" ✗ Error: {e}")
|
||||
return []
|
||||
|
||||
def get_user_non_funding_ledger_updates(self, days: int = 7) -> list:
|
||||
"""
|
||||
Get non-funding ledger updates (deposits, withdrawals, liquidations).
|
||||
|
||||
Args:
|
||||
days: Number of days of history (default: 7)
|
||||
|
||||
Returns:
|
||||
List of ledger updates
|
||||
"""
|
||||
end_time = int(datetime.now().timestamp() * 1000)
|
||||
start_time = int((datetime.now() - timedelta(days=days)).timestamp() * 1000)
|
||||
|
||||
print(f"\n📒 Fetching Ledger Updates (last {days} days)...")
|
||||
try:
|
||||
data = self.info.user_non_funding_ledger_updates(self.wallet_address, start_time, end_time)
|
||||
|
||||
if data:
|
||||
print(f" ✓ Ledger Updates: {len(data)}")
|
||||
# Categorize updates
|
||||
deposits = [u for u in data if 'deposit' in str(u.get('delta', {})).lower()]
|
||||
withdrawals = [u for u in data if 'withdraw' in str(u.get('delta', {})).lower()]
|
||||
print(f" ✓ Deposits: {len(deposits)}, Withdrawals: {len(withdrawals)}")
|
||||
else:
|
||||
print(" ⚠ No ledger updates found")
|
||||
|
||||
return data
|
||||
except Exception as e:
|
||||
print(f" ✗ Error: {e}")
|
||||
return []
|
||||
|
||||
def get_referral_state(self) -> Dict[str, Any]:
|
||||
"""
|
||||
Get referral program state for the user.
|
||||
|
||||
Returns:
|
||||
Dict with referral status and earnings
|
||||
"""
|
||||
print("\n🎁 Fetching Referral State...")
|
||||
try:
|
||||
data = self.info.query_referral_state(self.wallet_address)
|
||||
|
||||
if data:
|
||||
print(f" ✓ Referral Code: {data.get('referralCode', 'N/A')}")
|
||||
print(f" ✓ Referees: {len(data.get('referees', []))}")
|
||||
|
||||
return data
|
||||
except Exception as e:
|
||||
print(f" ✗ Error: {e}")
|
||||
return {}
|
||||
|
||||
def get_sub_accounts(self) -> list:
|
||||
"""
|
||||
Get list of sub-accounts for the user.
|
||||
|
||||
Returns:
|
||||
List of sub-account addresses
|
||||
"""
|
||||
print("\n👥 Fetching Sub-Accounts...")
|
||||
try:
|
||||
data = self.info.query_sub_accounts(self.wallet_address)
|
||||
|
||||
if data:
|
||||
print(f" ✓ Sub-Accounts: {len(data)}")
|
||||
else:
|
||||
print(" ⚠ No sub-accounts found")
|
||||
|
||||
return data
|
||||
except Exception as e:
|
||||
print(f" ✗ Error: {e}")
|
||||
return []
|
||||
|
||||
def fetch_all_data(self, save_to_file: bool = True) -> Dict[str, Any]:
|
||||
"""
|
||||
Fetch all available data for the wallet.
|
||||
|
||||
Args:
|
||||
save_to_file: If True, save results to JSON file
|
||||
|
||||
Returns:
|
||||
Dict containing all fetched data
|
||||
"""
|
||||
print("=" * 80)
|
||||
print("HYPERLIQUID WALLET DATA FETCHER")
|
||||
print("=" * 80)
|
||||
|
||||
all_data = {
|
||||
'wallet_address': self.wallet_address,
|
||||
'timestamp': datetime.now().isoformat(),
|
||||
'data': {}
|
||||
}
|
||||
|
||||
# Fetch all data sections
|
||||
all_data['data']['user_state'] = self.get_user_state()
|
||||
all_data['data']['spot_state'] = self.get_spot_state()
|
||||
all_data['data']['open_orders'] = self.get_open_orders()
|
||||
all_data['data']['recent_fills'] = self.get_user_fills(limit=50)
|
||||
all_data['data']['fills_last_7_days'] = self.get_user_fills_by_time()
|
||||
all_data['data']['user_fees'] = self.get_user_fees()
|
||||
all_data['data']['rate_limit'] = self.get_user_rate_limit()
|
||||
all_data['data']['funding_payments'] = self.get_user_funding_history(days=7)
|
||||
all_data['data']['ledger_updates'] = self.get_user_non_funding_ledger_updates(days=7)
|
||||
all_data['data']['referral_state'] = self.get_referral_state()
|
||||
all_data['data']['sub_accounts'] = self.get_sub_accounts()
|
||||
|
||||
# Optional: Fetch funding history for positions
|
||||
user_state = all_data['data']['user_state']
|
||||
if user_state and user_state.get('assetPositions'):
|
||||
all_data['data']['funding_history'] = {}
|
||||
for position in user_state['assetPositions'][:3]: # First 3 positions
|
||||
coin = position.get('position', {}).get('coin')
|
||||
if coin:
|
||||
all_data['data']['funding_history'][coin] = self.get_funding_history(coin, days=7)
|
||||
|
||||
print("\n" + "=" * 80)
|
||||
print("DATA COLLECTION COMPLETE")
|
||||
print("=" * 80)
|
||||
|
||||
# Save to file
|
||||
if save_to_file:
|
||||
filename = f"hyperliquid_wallet_data_{self.wallet_address[:10]}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
|
||||
with open(filename, 'w') as f:
|
||||
json.dump(all_data, f, indent=2, default=str)
|
||||
print(f"\n💾 Data saved to: {filename}")
|
||||
|
||||
return all_data
|
||||
|
||||
|
||||
def main():
|
||||
"""Main execution function."""
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: python hyperliquid_wallet_data.py <wallet_address> [--testnet]")
|
||||
print("\nExample:")
|
||||
print(" python hyperliquid_wallet_data.py 0xcd5051944f780a621ee62e39e493c489668acf4d")
|
||||
sys.exit(1)
|
||||
|
||||
wallet_address = sys.argv[1]
|
||||
use_testnet = '--testnet' in sys.argv
|
||||
|
||||
# Validate wallet address format
|
||||
if not wallet_address.startswith('0x') or len(wallet_address) != 42:
|
||||
print("❌ Error: Invalid wallet address format")
|
||||
print(" Address must be in format: 0x followed by 40 hexadecimal characters")
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
analyzer = HyperliquidWalletAnalyzer(wallet_address, use_testnet=use_testnet)
|
||||
data = analyzer.fetch_all_data(save_to_file=True)
|
||||
|
||||
print("\n✅ All data fetched successfully!")
|
||||
print(f"\n📊 Summary:")
|
||||
print(f" - Account Value: ${data['data']['user_state'].get('marginSummary', {}).get('accountValue', 0)}")
|
||||
print(f" - Open Positions: {len(data['data']['user_state'].get('assetPositions', []))}")
|
||||
print(f" - Spot Holdings: {len(data['data']['spot_state'].get('balances', []))}")
|
||||
print(f" - Open Orders: {len(data['data']['open_orders'])}")
|
||||
print(f" - Recent Fills: {len(data['data']['recent_fills'])}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"\n❌ Fatal Error: {e}")
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
367
whale_tracker.py
Normal file
367
whale_tracker.py
Normal file
@ -0,0 +1,367 @@
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
import requests
|
||||
import logging
|
||||
import argparse
|
||||
import sys
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
# --- Configuration ---
|
||||
# !! IMPORTANT: Update this to your actual Hyperliquid API endpoint !!
|
||||
API_ENDPOINT = "https://api.hyperliquid.xyz/info"
|
||||
|
||||
INPUT_FILE = os.path.join("_data", "wallets_to_track.json")
|
||||
OUTPUT_FILE = os.path.join("_data", "wallets_info.json")
|
||||
LOGS_DIR = "_logs"
|
||||
LOG_FILE = os.path.join(LOGS_DIR, "whale_tracker.log")
|
||||
|
||||
# Polling intervals (in seconds)
|
||||
POLL_INTERVALS = {
|
||||
'core_data': 10, # 5-15s range
|
||||
'open_orders': 20, # 15-30s range
|
||||
'account_metrics': 180, # 1-5m range
|
||||
'ledger_updates': 600, # 5-15m range
|
||||
'save_data': 5, # How often to write to wallets_info.json
|
||||
'reload_wallets': 60 # Check for wallet list changes every 60s
|
||||
}
|
||||
|
||||
class HyperliquidAPI:
|
||||
"""
|
||||
Client to handle POST requests to the Hyperliquid info endpoint.
|
||||
"""
|
||||
def __init__(self, base_url):
|
||||
self.base_url = base_url
|
||||
self.session = requests.Session()
|
||||
logging.info(f"API Client initialized for endpoint: {base_url}")
|
||||
|
||||
def post_request(self, payload):
|
||||
"""
|
||||
Internal helper to send POST requests and handle errors.
|
||||
"""
|
||||
try:
|
||||
response = self.session.post(self.base_url, json=payload, timeout=10)
|
||||
response.raise_for_status() # Raise an exception for bad status codes (4xx or 5xx)
|
||||
return response.json()
|
||||
except requests.exceptions.HTTPError as e:
|
||||
logging.error(f"HTTP Error: {e.response.status_code} for {e.request.url}. Response: {e.response.text}")
|
||||
except requests.exceptions.ConnectionError as e:
|
||||
logging.error(f"Connection Error: {e}")
|
||||
except requests.exceptions.Timeout:
|
||||
logging.error(f"Request timed out for payload: {payload.get('type')}")
|
||||
except json.JSONDecodeError:
|
||||
logging.error(f"Failed to decode JSON response. Response text: {response.text if 'response' in locals() else 'No response text'}")
|
||||
except Exception as e:
|
||||
logging.error(f"An unexpected error occurred in post_request: {e}", exc_info=True)
|
||||
return None
|
||||
|
||||
def get_user_state(self, user_address: str):
|
||||
payload = {"type": "clearinghouseState", "user": user_address}
|
||||
return self.post_request(payload)
|
||||
|
||||
def get_open_orders(self, user_address: str):
|
||||
payload = {"type": "openOrders", "user": user_address}
|
||||
return self.post_request(payload)
|
||||
|
||||
def get_user_rate_limit(self, user_address: str):
|
||||
payload = {"type": "userRateLimit", "user": user_address}
|
||||
return self.post_request(payload)
|
||||
|
||||
def get_user_ledger_updates(self, user_address: str, start_time_ms: int, end_time_ms: int):
|
||||
payload = {
|
||||
"type": "userNonFundingLedgerUpdates",
|
||||
"user": user_address,
|
||||
"startTime": start_time_ms,
|
||||
"endTime": end_time_ms
|
||||
}
|
||||
return self.post_request(payload)
|
||||
|
||||
class WalletTracker:
|
||||
"""
|
||||
Main class to track wallets, process data, and store results.
|
||||
"""
|
||||
def __init__(self, api_client, wallets_to_track):
|
||||
self.api = api_client
|
||||
self.wallets = wallets_to_track # This is the list of dicts
|
||||
self.wallets_by_name = {w['name']: w for w in self.wallets}
|
||||
self.wallets_data = {
|
||||
wallet['name']: {"address": wallet['address']} for wallet in self.wallets
|
||||
}
|
||||
logging.info(f"WalletTracker initialized for {len(self.wallets)} wallets.")
|
||||
|
||||
def reload_wallets(self):
|
||||
"""
|
||||
Checks the INPUT_FILE for changes and updates the tracked wallet list.
|
||||
"""
|
||||
logging.debug("Reloading wallet list...")
|
||||
try:
|
||||
with open(INPUT_FILE, 'r') as f:
|
||||
new_wallets_list = json.load(f)
|
||||
if not isinstance(new_wallets_list, list):
|
||||
logging.warning(f"Failed to reload '{INPUT_FILE}': content is not a list.")
|
||||
return
|
||||
|
||||
new_wallets_by_name = {w['name']: w for w in new_wallets_list}
|
||||
old_names = set(self.wallets_by_name.keys())
|
||||
new_names = set(new_wallets_by_name.keys())
|
||||
|
||||
added_names = new_names - old_names
|
||||
removed_names = old_names - new_names
|
||||
|
||||
if not added_names and not removed_names:
|
||||
logging.debug("Wallet list is unchanged.")
|
||||
return # No changes
|
||||
|
||||
# Update internal wallet list
|
||||
self.wallets = new_wallets_list
|
||||
self.wallets_by_name = new_wallets_by_name
|
||||
|
||||
# Add new wallets to wallets_data
|
||||
for name in added_names:
|
||||
self.wallets_data[name] = {"address": self.wallets_by_name[name]['address']}
|
||||
logging.info(f"Added new wallet to track: {name}")
|
||||
|
||||
# Remove old wallets from wallets_data
|
||||
for name in removed_names:
|
||||
if name in self.wallets_data:
|
||||
del self.wallets_data[name]
|
||||
logging.info(f"Removed wallet from tracking: {name}")
|
||||
|
||||
logging.info(f"Wallet list reloaded. Tracking {len(self.wallets)} wallets.")
|
||||
|
||||
except (FileNotFoundError, json.JSONDecodeError, ValueError) as e:
|
||||
logging.error(f"Failed to reload and parse '{INPUT_FILE}': {e}")
|
||||
except Exception as e:
|
||||
logging.error(f"Unexpected error during wallet reload: {e}", exc_info=True)
|
||||
|
||||
|
||||
def calculate_core_metrics(self, state_data: dict) -> dict:
|
||||
"""
|
||||
Performs calculations based on user_state data.
|
||||
"""
|
||||
if not state_data or 'crossMarginSummary' not in state_data:
|
||||
logging.warning("Core state data is missing 'crossMarginSummary'.")
|
||||
return {"raw_state": state_data}
|
||||
|
||||
summary = state_data['crossMarginSummary']
|
||||
account_value = float(summary.get('accountValue', 0))
|
||||
margin_used = float(summary.get('totalMarginUsed', 0))
|
||||
|
||||
# Calculations
|
||||
margin_utilization = (margin_used / account_value) if account_value > 0 else 0
|
||||
available_margin = account_value - margin_used
|
||||
|
||||
total_position_value = 0
|
||||
if 'assetPositions' in state_data:
|
||||
for pos in state_data.get('assetPositions', []):
|
||||
try:
|
||||
# Use 'value' for position value
|
||||
pos_value_str = pos.get('position', {}).get('value', '0')
|
||||
total_position_value += float(pos_value_str)
|
||||
except (ValueError, TypeError):
|
||||
logging.warning(f"Could not parse position value: {pos.get('position', {}).get('value')}")
|
||||
continue
|
||||
|
||||
portfolio_leverage = (total_position_value / account_value) if account_value > 0 else 0
|
||||
|
||||
# Return calculated metrics alongside raw data
|
||||
return {
|
||||
"raw_state": state_data,
|
||||
"account_value": account_value,
|
||||
"margin_used": margin_used,
|
||||
"margin_utilization": margin_utilization,
|
||||
"available_margin": available_margin,
|
||||
"total_position_value": total_position_value,
|
||||
"portfolio_leverage": portfolio_leverage
|
||||
}
|
||||
|
||||
def poll_core_data(self):
|
||||
logging.debug("Polling Core Data...")
|
||||
# Use self.wallets which is updated by reload_wallets
|
||||
for wallet in self.wallets:
|
||||
name = wallet['name']
|
||||
address = wallet['address']
|
||||
state_data = self.api.get_user_state(address)
|
||||
if state_data:
|
||||
calculated_data = self.calculate_core_metrics(state_data)
|
||||
# Ensure wallet hasn't been removed by a concurrent reload
|
||||
if name in self.wallets_data:
|
||||
self.wallets_data[name]['core_state'] = calculated_data
|
||||
time.sleep(0.1) # Avoid bursting requests
|
||||
|
||||
def poll_open_orders(self):
|
||||
logging.debug("Polling Open Orders...")
|
||||
for wallet in self.wallets:
|
||||
name = wallet['name']
|
||||
address = wallet['address']
|
||||
orders_data = self.api.get_open_orders(address)
|
||||
if orders_data:
|
||||
# TODO: Add calculations for 'pending_margin_required' if logic is available
|
||||
if name in self.wallets_data:
|
||||
self.wallets_data[name]['open_orders'] = {"raw_orders": orders_data}
|
||||
time.sleep(0.1)
|
||||
|
||||
def poll_account_metrics(self):
|
||||
logging.debug("Polling Account Metrics...")
|
||||
for wallet in self.wallets:
|
||||
name = wallet['name']
|
||||
address = wallet['address']
|
||||
metrics_data = self.api.get_user_rate_limit(address)
|
||||
if metrics_data:
|
||||
if name in self.wallets_data:
|
||||
self.wallets_data[name]['account_metrics'] = metrics_data
|
||||
time.sleep(0.1)
|
||||
|
||||
def poll_ledger_updates(self):
|
||||
logging.debug("Polling Ledger Updates...")
|
||||
end_time_ms = int(datetime.now().timestamp() * 1000)
|
||||
start_time_ms = int((datetime.now() - timedelta(minutes=15)).timestamp() * 1000)
|
||||
|
||||
for wallet in self.wallets:
|
||||
name = wallet['name']
|
||||
address = wallet['address']
|
||||
ledger_data = self.api.get_user_ledger_updates(address, start_time_ms, end_time_ms)
|
||||
if ledger_data:
|
||||
if name in self.wallets_data:
|
||||
self.wallets_data[name]['ledger_updates'] = ledger_data
|
||||
time.sleep(0.1)
|
||||
|
||||
def save_data_to_json(self):
|
||||
"""
|
||||
Atomically writes the current wallet data to the output JSON file.
|
||||
(No longer needs cleaning logic)
|
||||
"""
|
||||
logging.debug(f"Saving data to {OUTPUT_FILE}...")
|
||||
|
||||
temp_file = OUTPUT_FILE + ".tmp"
|
||||
try:
|
||||
# Save the data
|
||||
with open(temp_file, 'w', encoding='utf-8') as f:
|
||||
# self.wallets_data is automatically kept clean by reload_wallets
|
||||
json.dump(self.wallets_data, f, indent=2)
|
||||
# Atomic rename (move)
|
||||
os.replace(temp_file, OUTPUT_FILE)
|
||||
except (IOError, json.JSONDecodeError) as e:
|
||||
logging.error(f"Failed to write wallet data to file: {e}")
|
||||
except Exception as e:
|
||||
logging.error(f"An unexpected error occurred during file save: {e}")
|
||||
if os.path.exists(temp_file):
|
||||
os.remove(temp_file)
|
||||
|
||||
class WhaleTrackerRunner:
|
||||
"""
|
||||
Manages the polling loop using last-run timestamps instead of a complex scheduler.
|
||||
"""
|
||||
def __init__(self, api_client, wallets, shared_whale_data_dict=None): # Kept arg for compatibility
|
||||
self.tracker = WalletTracker(api_client, wallets)
|
||||
self.last_poll_times = {key: 0 for key in POLL_INTERVALS}
|
||||
self.poll_intervals = POLL_INTERVALS
|
||||
logging.info("WhaleTrackerRunner initialized to save to JSON file.")
|
||||
|
||||
def update_shared_data(self):
|
||||
"""
|
||||
This function is no longer called by the run loop.
|
||||
It's kept here to prevent errors if imported elsewhere, but is now unused.
|
||||
"""
|
||||
logging.debug("No shared dict, saving data to JSON file.")
|
||||
self.tracker.save_data_to_json()
|
||||
|
||||
|
||||
def run(self):
|
||||
logging.info("Starting main polling loop...")
|
||||
while True:
|
||||
try:
|
||||
now = time.time()
|
||||
|
||||
if now - self.last_poll_times['reload_wallets'] > self.poll_intervals['reload_wallets']:
|
||||
self.tracker.reload_wallets()
|
||||
self.last_poll_times['reload_wallets'] = now
|
||||
|
||||
if now - self.last_poll_times['core_data'] > self.poll_intervals['core_data']:
|
||||
self.tracker.poll_core_data()
|
||||
self.last_poll_times['core_data'] = now
|
||||
|
||||
if now - self.last_poll_times['open_orders'] > self.poll_intervals['open_orders']:
|
||||
self.tracker.poll_open_orders()
|
||||
self.last_poll_times['open_orders'] = now
|
||||
|
||||
if now - self.last_poll_times['account_metrics'] > self.poll_intervals['account_metrics']:
|
||||
self.tracker.poll_account_metrics()
|
||||
self.last_poll_times['account_metrics'] = now
|
||||
|
||||
if now - self.last_poll_times['ledger_updates'] > self.poll_intervals['ledger_updates']:
|
||||
self.tracker.poll_ledger_updates()
|
||||
self.last_poll_times['ledger_updates'] = now
|
||||
|
||||
if now - self.last_poll_times['save_data'] > self.poll_intervals['save_data']:
|
||||
self.tracker.save_data_to_json() # <-- NEW
|
||||
self.last_poll_times['save_data'] = now
|
||||
|
||||
# Sleep for a short duration to prevent busy-waiting
|
||||
time.sleep(1)
|
||||
|
||||
except Exception as e:
|
||||
logging.critical(f"Unhandled exception in main loop: {e}", exc_info=True)
|
||||
time.sleep(10)
|
||||
|
||||
def setup_logging(log_level_str: str, process_name: str):
|
||||
"""Configures logging for the script."""
|
||||
if not os.path.exists(LOGS_DIR):
|
||||
try:
|
||||
os.makedirs(LOGS_DIR)
|
||||
except OSError as e:
|
||||
print(f"Failed to create logs directory {LOGS_DIR}: {e}")
|
||||
return
|
||||
|
||||
level_map = {
|
||||
'debug': logging.DEBUG,
|
||||
'normal': logging.INFO,
|
||||
'off': logging.NOTSET
|
||||
}
|
||||
log_level = level_map.get(log_level_str.lower(), logging.INFO)
|
||||
|
||||
if log_level == logging.NOTSET:
|
||||
return
|
||||
|
||||
handlers_list = [logging.FileHandler(LOG_FILE, mode='a')]
|
||||
|
||||
if sys.stdout.isatty():
|
||||
handlers_list.append(logging.StreamHandler(sys.stdout))
|
||||
|
||||
logging.basicConfig(
|
||||
level=log_level,
|
||||
format=f"%(asctime)s.%(msecs)03d | {process_name:<20} | %(levelname)-8s | %(message)s",
|
||||
datefmt='%Y-%m-%d %H:%M:%S',
|
||||
handlers=handlers_list
|
||||
)
|
||||
|
||||
if __name__ == "__main__":
|
||||
parser = argparse.ArgumentParser(description="Hyperliquid Whale Tracker")
|
||||
parser.add_argument("--log-level", default="normal", choices=['off', 'normal', 'debug'])
|
||||
args = parser.parse_args()
|
||||
|
||||
setup_logging(args.log_level, "WhaleTracker")
|
||||
|
||||
# Load wallets to track
|
||||
wallets_to_track = []
|
||||
try:
|
||||
with open(INPUT_FILE, 'r') as f:
|
||||
wallets_to_track = json.load(f)
|
||||
if not isinstance(wallets_to_track, list) or not wallets_to_track:
|
||||
raise ValueError(f"'{INPUT_FILE}' is empty or not a list.")
|
||||
except (FileNotFoundError, json.JSONDecodeError, ValueError) as e:
|
||||
logging.critical(f"Failed to load '{INPUT_FILE}': {e}. Exiting.")
|
||||
sys.exit(1)
|
||||
|
||||
# Initialize API client
|
||||
api_client = HyperliquidAPI(base_url=API_ENDPOINT)
|
||||
|
||||
# Initialize and run the tracker
|
||||
runner = WhaleTrackerRunner(api_client, wallets_to_track, shared_whale_data_dict=None)
|
||||
|
||||
try:
|
||||
runner.run()
|
||||
except KeyboardInterrupt:
|
||||
logging.info("Whale Tracker shutting down.")
|
||||
sys.exit(0)
|
||||
|
||||
Reference in New Issue
Block a user