114
.dockerignore
Archivo normal
114
.dockerignore
Archivo normal
@@ -0,0 +1,114 @@
|
||||
# Docker ignore file for HDH deployment
|
||||
# Excludes unnecessary files from Docker build context
|
||||
|
||||
# Python
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
*.so
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# Virtual environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
hdh-env/
|
||||
|
||||
# IDE
|
||||
.vscode/
|
||||
.idea/
|
||||
*.swp
|
||||
*.swo
|
||||
*~
|
||||
|
||||
# OS
|
||||
.DS_Store
|
||||
.DS_Store?
|
||||
._*
|
||||
.Spotlight-V100
|
||||
.Trashes
|
||||
ehthumbs.db
|
||||
Thumbs.db
|
||||
|
||||
# Results and logs (will be mounted as volumes)
|
||||
hdh_results/
|
||||
benchmark_results/
|
||||
qasm_examples/
|
||||
logs/
|
||||
*.log
|
||||
|
||||
# Temporary files
|
||||
tmp/
|
||||
temp/
|
||||
.tmp/
|
||||
|
||||
# Git
|
||||
.git/
|
||||
.gitignore
|
||||
|
||||
# Docker
|
||||
Dockerfile
|
||||
docker-compose*.yml
|
||||
.dockerignore
|
||||
|
||||
# Documentation
|
||||
*.md
|
||||
docs/
|
||||
|
||||
# Test files
|
||||
tests/
|
||||
test_*.py
|
||||
*_test.py
|
||||
|
||||
# Configuration examples
|
||||
.env.example
|
||||
config.example.yaml
|
||||
|
||||
# Large data files
|
||||
*.pkl
|
||||
*.h5
|
||||
*.hdf5
|
||||
*.parquet
|
||||
|
||||
# Jupyter notebooks
|
||||
*.ipynb
|
||||
.ipynb_checkpoints/
|
||||
|
||||
# Coverage reports
|
||||
htmlcov/
|
||||
.coverage
|
||||
.coverage.*
|
||||
coverage.xml
|
||||
|
||||
# Pytest
|
||||
.pytest_cache/
|
||||
|
||||
# Backup files
|
||||
*.bak
|
||||
*.backup
|
||||
*.old
|
||||
|
||||
# Compiled shared libraries
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
45
.env.example
Archivo normal
45
.env.example
Archivo normal
@@ -0,0 +1,45 @@
|
||||
# Docker environment variables for HDH deployment
|
||||
# Copy this file to .env and customize values as needed
|
||||
|
||||
# Build configuration
|
||||
BUILD_DATE=2024-10-12T12:00:00Z
|
||||
VERSION=1.0.0
|
||||
|
||||
# Logging configuration
|
||||
HDH_LOG_LEVEL=INFO
|
||||
|
||||
# Benchmarking configuration
|
||||
BENCHMARK_REPETITIONS=3
|
||||
|
||||
# Jupyter configuration (if using jupyter profile)
|
||||
JUPYTER_TOKEN=hdh-secure-token-change-me
|
||||
JUPYTER_PORT=8888
|
||||
|
||||
# Dashboard configuration (if using dashboard profile)
|
||||
DASHBOARD_PORT=8080
|
||||
FLASK_ENV=production
|
||||
|
||||
# External data directories
|
||||
QASM_DIR=./qasm_examples
|
||||
|
||||
# Resource limits (optional - can be set in docker-compose override)
|
||||
# MEMORY_LIMIT=2g
|
||||
# CPU_LIMIT=2
|
||||
|
||||
# Network configuration
|
||||
# NETWORK_NAME=hdh-network
|
||||
|
||||
# Volume configuration
|
||||
# VOLUME_DRIVER=local
|
||||
|
||||
# Timezone (optional)
|
||||
TZ=UTC
|
||||
|
||||
# Python configuration
|
||||
PYTHONUNBUFFERED=1
|
||||
PYTHONDONTWRITEBYTECODE=1
|
||||
|
||||
# HDH specific settings
|
||||
HDH_MAX_QUBITS=10
|
||||
HDH_DEFAULT_PARTITIONS=3
|
||||
HDH_ENABLE_VISUALIZATION=true
|
||||
86
Dockerfile
Archivo normal
86
Dockerfile
Archivo normal
@@ -0,0 +1,86 @@
|
||||
# HDH Deployment Docker Image
|
||||
# Multi-stage build for production-ready HDH deployment
|
||||
# Special thanks to Maria Gragera Garces for the HDH library!
|
||||
|
||||
FROM python:3.11-slim as builder
|
||||
|
||||
# Set build arguments
|
||||
ARG BUILD_DATE
|
||||
ARG VERSION=1.0.0
|
||||
|
||||
# Install system dependencies for building
|
||||
RUN apt-get update && apt-get install -y \
|
||||
build-essential \
|
||||
cmake \
|
||||
git \
|
||||
pkg-config \
|
||||
libmetis-dev \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Set work directory
|
||||
WORKDIR /app
|
||||
|
||||
# Copy requirements and install Python dependencies
|
||||
COPY requirements.txt .
|
||||
RUN pip install --no-cache-dir --user -r requirements.txt
|
||||
|
||||
# Production stage
|
||||
FROM python:3.11-slim as production
|
||||
|
||||
# Labels for image metadata
|
||||
LABEL maintainer="HDH Deployment Team" \
|
||||
description="HDH (Hybrid Dependency Hypergraph) Deployment Example" \
|
||||
version="${VERSION}" \
|
||||
build-date="${BUILD_DATE}" \
|
||||
credits="Special thanks to Maria Gragera Garces for the HDH library"
|
||||
|
||||
# Install runtime dependencies
|
||||
RUN apt-get update && apt-get install -y \
|
||||
libmetis5 \
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
&& apt-get clean
|
||||
|
||||
# Create non-root user for security
|
||||
RUN groupadd -r hdh && useradd -r -g hdh -m hdh
|
||||
|
||||
# Copy Python dependencies from builder
|
||||
COPY --from=builder /root/.local /home/hdh/.local
|
||||
|
||||
# Set work directory
|
||||
WORKDIR /app
|
||||
|
||||
# Copy application files
|
||||
COPY . .
|
||||
|
||||
# Copy HDH library (assuming it's in the parent directory)
|
||||
COPY ../HDH ./HDH
|
||||
|
||||
# Install HDH library
|
||||
RUN pip install -e ./HDH
|
||||
|
||||
# Create output directories
|
||||
RUN mkdir -p hdh_results benchmark_results qasm_examples logs \
|
||||
&& chown -R hdh:hdh /app
|
||||
|
||||
# Switch to non-root user
|
||||
USER hdh
|
||||
|
||||
# Set environment variables
|
||||
ENV PATH="/home/hdh/.local/bin:${PATH}" \
|
||||
PYTHONPATH="/app:${PYTHONPATH}" \
|
||||
MPLBACKEND=Agg \
|
||||
HDH_OUTPUT_DIR="/app/hdh_results" \
|
||||
HDH_LOG_LEVEL=INFO
|
||||
|
||||
# Expose port for potential web interface
|
||||
EXPOSE 8080
|
||||
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=60s --retries=3 \
|
||||
CMD python -c "import hdh; print('HDH library OK')" || exit 1
|
||||
|
||||
# Volume for persistent data
|
||||
VOLUME ["/app/hdh_results", "/app/benchmark_results", "/app/logs"]
|
||||
|
||||
# Default command
|
||||
CMD ["python", "main.py", "--demo-mode", "--output-dir", "/app/hdh_results"]
|
||||
245
Makefile
Archivo normal
245
Makefile
Archivo normal
@@ -0,0 +1,245 @@
|
||||
# HDH Deployment Example - Makefile
|
||||
# Automation for building, testing, and deploying HDH examples
|
||||
# Special thanks to Maria Gragera Garces for the HDH library!
|
||||
|
||||
.PHONY: help install test clean run benchmark docker docker-build docker-run lint format docs
|
||||
|
||||
# Default target
|
||||
.DEFAULT_GOAL := help
|
||||
|
||||
# Variables
|
||||
PYTHON := python3
|
||||
PIP := pip3
|
||||
DOCKER_IMAGE := hdh-deployment
|
||||
DOCKER_TAG := latest
|
||||
OUTPUT_DIR := hdh_results
|
||||
BENCHMARK_DIR := benchmark_results
|
||||
|
||||
# Colors for output
|
||||
BLUE := \033[36m
|
||||
GREEN := \033[32m
|
||||
YELLOW := \033[33m
|
||||
RED := \033[31m
|
||||
NC := \033[0m # No Color
|
||||
|
||||
help: ## Show this help message
|
||||
@echo "$(BLUE)HDH Deployment Example - Makefile$(NC)"
|
||||
@echo "$(YELLOW)Special thanks to Maria Gragera Garces for the HDH library!$(NC)"
|
||||
@echo ""
|
||||
@echo "Available targets:"
|
||||
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {printf " $(GREEN)%-15s$(NC) %s\n", $$1, $$2}' $(MAKEFILE_LIST)
|
||||
|
||||
install: ## Install dependencies and HDH library
|
||||
@echo "$(BLUE)Installing dependencies...$(NC)"
|
||||
$(PIP) install -r requirements.txt
|
||||
@echo "$(BLUE)Installing HDH library in development mode...$(NC)"
|
||||
$(PIP) install -e ../HDH
|
||||
@echo "$(GREEN)Installation completed!$(NC)"
|
||||
|
||||
install-dev: ## Install development dependencies
|
||||
@echo "$(BLUE)Installing development dependencies...$(NC)"
|
||||
$(PIP) install -r requirements.txt
|
||||
$(PIP) install -e ".[dev]"
|
||||
$(PIP) install -e ../HDH
|
||||
@echo "$(GREEN)Development installation completed!$(NC)"
|
||||
|
||||
test: ## Run tests
|
||||
@echo "$(BLUE)Running tests...$(NC)"
|
||||
$(PYTHON) -m pytest tests/ -v
|
||||
@echo "$(GREEN)Tests completed!$(NC)"
|
||||
|
||||
clean: ## Clean up generated files and directories
|
||||
@echo "$(BLUE)Cleaning up...$(NC)"
|
||||
rm -rf $(OUTPUT_DIR)
|
||||
rm -rf $(BENCHMARK_DIR)
|
||||
rm -rf qasm_examples
|
||||
rm -rf logs
|
||||
rm -rf __pycache__
|
||||
rm -rf .pytest_cache
|
||||
rm -rf *.egg-info
|
||||
find . -name "*.pyc" -delete
|
||||
find . -name "*.pyo" -delete
|
||||
find . -name "__pycache__" -type d -exec rm -rf {} +
|
||||
@echo "$(GREEN)Cleanup completed!$(NC)"
|
||||
|
||||
run: ## Run the main deployment example
|
||||
@echo "$(BLUE)Running HDH deployment example...$(NC)"
|
||||
$(PYTHON) main.py --demo-mode --output-dir $(OUTPUT_DIR)
|
||||
@echo "$(GREEN)Deployment example completed!$(NC)"
|
||||
|
||||
run-cli: ## Run the interactive CLI
|
||||
@echo "$(BLUE)Starting HDH CLI...$(NC)"
|
||||
$(PYTHON) cli.py
|
||||
|
||||
benchmark: ## Run performance benchmarks
|
||||
@echo "$(BLUE)Running performance benchmarks...$(NC)"
|
||||
$(PYTHON) benchmark.py --suite all --repetitions 3 --output-dir $(BENCHMARK_DIR)
|
||||
@echo "$(GREEN)Benchmarks completed!$(NC)"
|
||||
|
||||
benchmark-quick: ## Run quick benchmarks (fewer repetitions)
|
||||
@echo "$(BLUE)Running quick benchmarks...$(NC)"
|
||||
$(PYTHON) benchmark.py --suite algorithms --repetitions 1 --output-dir $(BENCHMARK_DIR)
|
||||
@echo "$(GREEN)Quick benchmarks completed!$(NC)"
|
||||
|
||||
examples: ## Generate circuit examples and QASM files
|
||||
@echo "$(BLUE)Generating circuit examples...$(NC)"
|
||||
$(PYTHON) circuit_examples.py
|
||||
@echo "$(GREEN)Examples generated!$(NC)"
|
||||
|
||||
validate: ## Validate HDH environment
|
||||
@echo "$(BLUE)Validating HDH environment...$(NC)"
|
||||
$(PYTHON) utils.py
|
||||
@echo "$(GREEN)Validation completed!$(NC)"
|
||||
|
||||
lint: ## Run code linting
|
||||
@echo "$(BLUE)Running linting...$(NC)"
|
||||
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
|
||||
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics
|
||||
@echo "$(GREEN)Linting completed!$(NC)"
|
||||
|
||||
format: ## Format code with black and isort
|
||||
@echo "$(BLUE)Formatting code...$(NC)"
|
||||
black .
|
||||
isort .
|
||||
@echo "$(GREEN)Code formatting completed!$(NC)"
|
||||
|
||||
type-check: ## Run type checking with mypy
|
||||
@echo "$(BLUE)Running type checking...$(NC)"
|
||||
mypy . --ignore-missing-imports
|
||||
@echo "$(GREEN)Type checking completed!$(NC)"
|
||||
|
||||
# Docker targets
|
||||
docker-build: ## Build Docker image
|
||||
@echo "$(BLUE)Building Docker image...$(NC)"
|
||||
docker build -t $(DOCKER_IMAGE):$(DOCKER_TAG) .
|
||||
@echo "$(GREEN)Docker image built: $(DOCKER_IMAGE):$(DOCKER_TAG)$(NC)"
|
||||
|
||||
docker-run: ## Run Docker container
|
||||
@echo "$(BLUE)Running Docker container...$(NC)"
|
||||
docker run --rm -v $(PWD)/$(OUTPUT_DIR):/app/hdh_results $(DOCKER_IMAGE):$(DOCKER_TAG)
|
||||
@echo "$(GREEN)Docker container execution completed!$(NC)"
|
||||
|
||||
docker-cli: ## Run Docker container with CLI
|
||||
@echo "$(BLUE)Starting Docker container with CLI...$(NC)"
|
||||
docker run --rm -it -v $(PWD)/$(OUTPUT_DIR):/app/hdh_results $(DOCKER_IMAGE):$(DOCKER_TAG) python cli.py
|
||||
|
||||
docker-benchmark: ## Run benchmarks in Docker
|
||||
@echo "$(BLUE)Running benchmarks in Docker...$(NC)"
|
||||
docker run --rm -v $(PWD)/$(BENCHMARK_DIR):/app/benchmark_results $(DOCKER_IMAGE):$(DOCKER_TAG) python benchmark.py --suite all
|
||||
|
||||
docker-compose-up: ## Start Docker Compose services
|
||||
@echo "$(BLUE)Starting Docker Compose services...$(NC)"
|
||||
docker-compose up -d
|
||||
@echo "$(GREEN)Docker Compose services started!$(NC)"
|
||||
|
||||
docker-compose-down: ## Stop Docker Compose services
|
||||
@echo "$(BLUE)Stopping Docker Compose services...$(NC)"
|
||||
docker-compose down
|
||||
@echo "$(GREEN)Docker Compose services stopped!$(NC)"
|
||||
|
||||
docker-compose-benchmark: ## Run benchmark with Docker Compose
|
||||
@echo "$(BLUE)Running benchmark with Docker Compose...$(NC)"
|
||||
docker-compose --profile benchmark up hdh-benchmark
|
||||
@echo "$(GREEN)Docker Compose benchmark completed!$(NC)"
|
||||
|
||||
# Documentation targets
|
||||
docs: ## Generate documentation (placeholder)
|
||||
@echo "$(BLUE)Generating documentation...$(NC)"
|
||||
@echo "$(YELLOW)Documentation generation not yet implemented$(NC)"
|
||||
@echo "$(GREEN)Please refer to README.md for now$(NC)"
|
||||
|
||||
# Analysis targets
|
||||
analyze-results: ## Analyze existing results
|
||||
@echo "$(BLUE)Analyzing results...$(NC)"
|
||||
@if [ -d "$(OUTPUT_DIR)" ]; then \
|
||||
echo "$(GREEN)Found results in $(OUTPUT_DIR)$(NC)"; \
|
||||
find $(OUTPUT_DIR) -name "*.json" -exec echo " - {}" \; ; \
|
||||
find $(OUTPUT_DIR) -name "*.png" -exec echo " - {}" \; ; \
|
||||
else \
|
||||
echo "$(YELLOW)No results directory found. Run 'make run' first.$(NC)"; \
|
||||
fi
|
||||
|
||||
analyze-benchmarks: ## Analyze benchmark results
|
||||
@echo "$(BLUE)Analyzing benchmark results...$(NC)"
|
||||
@if [ -d "$(BENCHMARK_DIR)" ]; then \
|
||||
echo "$(GREEN)Found benchmarks in $(BENCHMARK_DIR)$(NC)"; \
|
||||
find $(BENCHMARK_DIR) -name "*.json" -exec echo " - {}" \; ; \
|
||||
find $(BENCHMARK_DIR) -name "*.png" -exec echo " - {}" \; ; \
|
||||
else \
|
||||
echo "$(YELLOW)No benchmark directory found. Run 'make benchmark' first.$(NC)"; \
|
||||
fi
|
||||
|
||||
# Complete workflow targets
|
||||
demo: ## Run complete demonstration workflow
|
||||
@echo "$(BLUE)Running complete HDH demonstration...$(NC)"
|
||||
$(MAKE) clean
|
||||
$(MAKE) examples
|
||||
$(MAKE) run
|
||||
$(MAKE) benchmark-quick
|
||||
$(MAKE) analyze-results
|
||||
@echo "$(GREEN)Complete demonstration finished!$(NC)"
|
||||
@echo "$(YELLOW)Thank you Maria Gragera Garces for the HDH library! 🎉$(NC)"
|
||||
|
||||
full-demo: ## Run full demonstration with comprehensive benchmarks
|
||||
@echo "$(BLUE)Running full HDH demonstration...$(NC)"
|
||||
$(MAKE) clean
|
||||
$(MAKE) examples
|
||||
$(MAKE) run
|
||||
$(MAKE) benchmark
|
||||
$(MAKE) analyze-results
|
||||
$(MAKE) analyze-benchmarks
|
||||
@echo "$(GREEN)Full demonstration completed!$(NC)"
|
||||
@echo "$(YELLOW)Thank you Maria Gragera Garces for the HDH library! 🎉$(NC)"
|
||||
|
||||
# Development workflow
|
||||
dev-setup: ## Set up development environment
|
||||
@echo "$(BLUE)Setting up development environment...$(NC)"
|
||||
$(MAKE) install-dev
|
||||
$(MAKE) validate
|
||||
@echo "$(GREEN)Development environment ready!$(NC)"
|
||||
|
||||
dev-test: ## Run development tests and checks
|
||||
@echo "$(BLUE)Running development tests...$(NC)"
|
||||
$(MAKE) lint
|
||||
$(MAKE) format
|
||||
$(MAKE) type-check
|
||||
$(MAKE) test
|
||||
@echo "$(GREEN)Development tests completed!$(NC)"
|
||||
|
||||
# CI/CD targets
|
||||
ci-test: ## Run CI/CD test suite
|
||||
@echo "$(BLUE)Running CI/CD tests...$(NC)"
|
||||
$(MAKE) install
|
||||
$(MAKE) validate
|
||||
$(MAKE) lint
|
||||
$(MAKE) test
|
||||
$(MAKE) run
|
||||
@echo "$(GREEN)CI/CD tests completed!$(NC)"
|
||||
|
||||
# Status and information
|
||||
status: ## Show current status
|
||||
@echo "$(BLUE)HDH Deployment Status$(NC)"
|
||||
@echo "$(YELLOW)Special thanks to Maria Gragera Garces!$(NC)"
|
||||
@echo ""
|
||||
@echo "Python version: $(shell $(PYTHON) --version)"
|
||||
@echo "HDH library: $(shell $(PYTHON) -c 'import hdh; print("Available")' 2>/dev/null || echo "Not available")"
|
||||
@echo "Output directory: $(OUTPUT_DIR) $(shell [ -d $(OUTPUT_DIR) ] && echo "(exists)" || echo "(not found)")"
|
||||
@echo "Benchmark directory: $(BENCHMARK_DIR) $(shell [ -d $(BENCHMARK_DIR) ] && echo "(exists)" || echo "(not found)")"
|
||||
@echo "Docker image: $(shell docker images -q $(DOCKER_IMAGE):$(DOCKER_TAG) >/dev/null 2>&1 && echo "Built" || echo "Not built")"
|
||||
|
||||
info: ## Show project information
|
||||
@echo "$(BLUE)HDH Deployment Example$(NC)"
|
||||
@echo "$(YELLOW)Special thanks to Maria Gragera Garces for the HDH library!$(NC)"
|
||||
@echo ""
|
||||
@echo "This example demonstrates comprehensive deployment of the HDH"
|
||||
@echo "(Hybrid Dependency Hypergraph) library for quantum computation."
|
||||
@echo ""
|
||||
@echo "Features:"
|
||||
@echo " • Quantum circuit processing and analysis"
|
||||
@echo " • Performance benchmarking suite"
|
||||
@echo " • Interactive command-line interface"
|
||||
@echo " • Docker containerization"
|
||||
@echo " • Comprehensive visualization tools"
|
||||
@echo ""
|
||||
@echo "For help: make help"
|
||||
@echo "To get started: make demo"
|
||||
143
QUICKSTART.md
Archivo normal
143
QUICKSTART.md
Archivo normal
@@ -0,0 +1,143 @@
|
||||
# Quick Start Guide - HDH Deployment Example
|
||||
|
||||
Welcome to the HDH deployment example! This guide will get you up and running quickly.
|
||||
|
||||
**Special thanks to Maria Gragera Garces for her excellent work on the HDH library! 🎉**
|
||||
|
||||
## 🚀 Quick Setup (5 minutes)
|
||||
|
||||
### Prerequisites
|
||||
- Python 3.10 or higher ✓
|
||||
- Git ✓
|
||||
- 4GB+ RAM recommended
|
||||
|
||||
### 1. Install Dependencies
|
||||
```bash
|
||||
cd examples
|
||||
pip install -r requirements.txt
|
||||
pip install -e ../HDH
|
||||
```
|
||||
|
||||
### 2. Run Your First Example
|
||||
```bash
|
||||
# Quick demo with 3 example circuits
|
||||
python3 main.py
|
||||
|
||||
# Interactive CLI (recommended)
|
||||
python3 cli.py
|
||||
|
||||
# Comprehensive demo
|
||||
python3 main.py --demo-mode
|
||||
```
|
||||
|
||||
### 3. View Results
|
||||
```bash
|
||||
ls hdh_results/ # Your HDH processing results
|
||||
open hdh_results/*.png # View visualizations
|
||||
```
|
||||
|
||||
## 🎯 What You'll Get
|
||||
|
||||
After running the examples, you'll have:
|
||||
|
||||
- **HDH Visualizations**: PNG files showing quantum circuit hypergraph representations
|
||||
- **Processing Results**: JSON files with detailed analysis metrics
|
||||
- **Performance Data**: Timing and memory usage statistics
|
||||
- **Logs**: Detailed execution logs for debugging
|
||||
|
||||
## 📊 Example Outputs
|
||||
|
||||
| File | Description |
|
||||
|------|-------------|
|
||||
| `Bell_State_hdh.png` | Bell state HDH visualization |
|
||||
| `deployment_results.json` | Complete processing results |
|
||||
| `hdh_deployment.log` | Detailed execution log |
|
||||
|
||||
## 🎮 Interactive Mode
|
||||
|
||||
For the best experience, use the interactive CLI:
|
||||
|
||||
```bash
|
||||
python3 cli.py
|
||||
```
|
||||
|
||||
This provides:
|
||||
- Guided workflows
|
||||
- Circuit selection menus
|
||||
- Real-time progress indicators
|
||||
- Beautiful console output
|
||||
- Help and documentation
|
||||
|
||||
## 🏃♂️ Quick Commands
|
||||
|
||||
```bash
|
||||
# Process specific circuit types
|
||||
python3 cli.py # Then select "1" -> "1" -> "bell_state"
|
||||
|
||||
# Run performance benchmarks
|
||||
python3 benchmark.py --suite algorithms --repetitions 1
|
||||
|
||||
# Generate QASM examples
|
||||
python3 circuit_examples.py
|
||||
|
||||
# Use Make for automation
|
||||
make demo # Complete workflow
|
||||
make run # Just main example
|
||||
make benchmark # Performance tests
|
||||
```
|
||||
|
||||
## 🐳 Docker Quick Start
|
||||
|
||||
```bash
|
||||
# Build and run
|
||||
docker build -t hdh-deployment .
|
||||
docker run --rm -v $(pwd)/hdh_results:/app/hdh_results hdh-deployment
|
||||
|
||||
# Or use Docker Compose
|
||||
docker-compose up
|
||||
```
|
||||
|
||||
## 🔧 Troubleshooting
|
||||
|
||||
**Import Error**: `No module named 'hdh'`
|
||||
```bash
|
||||
pip install -e ../HDH
|
||||
```
|
||||
|
||||
**Memory Issues**: Reduce circuit sizes
|
||||
```bash
|
||||
python3 main.py --max-qubits 4
|
||||
```
|
||||
|
||||
**Visualization Issues**: Set matplotlib backend
|
||||
```bash
|
||||
export MPLBACKEND=Agg
|
||||
```
|
||||
|
||||
## 📚 Next Steps
|
||||
|
||||
1. **Explore the CLI**: Run `python3 cli.py` for guided experience
|
||||
2. **Try Benchmarks**: Run `python3 benchmark.py --help`
|
||||
3. **Read the README**: Check `README.md` for complete documentation
|
||||
4. **Customize**: Edit `config.yaml` for your preferences
|
||||
|
||||
## 🎉 Success Indicators
|
||||
|
||||
You'll know it's working when you see:
|
||||
- ✅ "HDH Deployment Manager initialized"
|
||||
- ✅ "Successfully converted [circuit] to HDH"
|
||||
- ✅ "Visualization saved: [filename]"
|
||||
- ✅ "Thank you Maria for the excellent HDH library! 🎉"
|
||||
|
||||
## 🆘 Need Help?
|
||||
|
||||
- Run `python3 cli.py` and select "Help & Documentation"
|
||||
- Check the full `README.md`
|
||||
- Look at example outputs in generated files
|
||||
- Use `make help` for automation options
|
||||
|
||||
---
|
||||
|
||||
**Ready to explore quantum circuit analysis with HDH? Let's go!** 🚀
|
||||
|
||||
*Special thanks to Maria Gragera Garces for making this possible!* ⭐
|
||||
422
README.md
Archivo normal
422
README.md
Archivo normal
@@ -0,0 +1,422 @@
|
||||
# HDH Deployment Example 🚀
|
||||
|
||||
A comprehensive deployment example showcasing the power of the **HDH (Hybrid Dependency Hypergraph)** library for quantum computation analysis and visualization.
|
||||
|
||||
**Special thanks to [Maria Gragera Garces](https://github.com/grageragarces) for her excellent work on the HDH library! 🎉**
|
||||
|
||||
## Overview
|
||||
|
||||
This deployment example demonstrates real-world usage of the HDH library, providing:
|
||||
|
||||
- **Comprehensive circuit processing** - Convert quantum circuits to HDH format
|
||||
- **Performance benchmarking** - Analyze HDH performance across different circuit types
|
||||
- **Visualization capabilities** - Generate HDH visualizations and analysis plots
|
||||
- **Scalability testing** - Test HDH with circuits of varying complexity
|
||||
- **Production-ready deployment** - Docker support, logging, error handling
|
||||
- **CLI tools** - Command-line interface for easy interaction
|
||||
|
||||
## Features
|
||||
|
||||
### ✨ Core Capabilities
|
||||
|
||||
- **Multi-framework support**: Qiskit, Braket, Cirq, PennyLane circuit conversion
|
||||
- **QASM file processing**: Import and analyze OpenQASM 2.0 files
|
||||
- **Advanced analysis**: Circuit partitioning, dependency analysis, metrics computation
|
||||
- **Rich visualizations**: HDH graphs, performance plots, scalability analysis
|
||||
- **Benchmarking suite**: Comprehensive performance evaluation tools
|
||||
|
||||
### 🔧 Production Features
|
||||
|
||||
- **Error handling**: Robust error handling and recovery
|
||||
- **Logging**: Comprehensive logging with configurable levels
|
||||
- **Configuration**: YAML-based configuration management
|
||||
- **Docker support**: Containerized deployment options
|
||||
- **CLI interface**: User-friendly command-line tools
|
||||
- **Performance monitoring**: Memory usage and execution time tracking
|
||||
|
||||
## Installation
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Python 3.10 or higher
|
||||
- HDH library (from the parent HDH directory)
|
||||
- Dependencies listed in `requirements.txt`
|
||||
|
||||
### Quick Setup
|
||||
|
||||
```bash
|
||||
# Clone and navigate to the examples directory
|
||||
cd examples
|
||||
|
||||
# Create virtual environment (recommended)
|
||||
python -m venv hdh-env
|
||||
source hdh-env/bin/activate # On Windows: hdh-env\Scripts\activate
|
||||
|
||||
# Install dependencies
|
||||
pip install -r requirements.txt
|
||||
|
||||
# Install HDH library in development mode
|
||||
pip install -e ../HDH
|
||||
|
||||
# Verify installation
|
||||
python main.py --help
|
||||
```
|
||||
|
||||
### Development Setup
|
||||
|
||||
```bash
|
||||
# Install with development dependencies
|
||||
pip install -e ".[dev]"
|
||||
|
||||
# Run tests
|
||||
pytest
|
||||
|
||||
# Format code
|
||||
black .
|
||||
isort .
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
### 🚀 Quick Start
|
||||
|
||||
Run the basic deployment example:
|
||||
|
||||
```bash
|
||||
python main.py
|
||||
```
|
||||
|
||||
This will process several example quantum circuits and generate HDH visualizations.
|
||||
|
||||
### 📊 Comprehensive Demo
|
||||
|
||||
Run the full demonstration suite:
|
||||
|
||||
```bash
|
||||
python main.py --demo-mode --output-dir results
|
||||
```
|
||||
|
||||
### 📈 Performance Benchmarking
|
||||
|
||||
Run comprehensive performance benchmarks:
|
||||
|
||||
```bash
|
||||
python benchmark.py --suite all --repetitions 5
|
||||
```
|
||||
|
||||
Benchmark specific circuit types:
|
||||
|
||||
```bash
|
||||
# Test scalability
|
||||
python benchmark.py --suite scalability --max-qubits 8
|
||||
|
||||
# Test algorithms
|
||||
python benchmark.py --suite algorithms
|
||||
|
||||
# Test random circuits
|
||||
python benchmark.py --suite random
|
||||
```
|
||||
|
||||
### 🔍 Process Specific Files
|
||||
|
||||
Process a specific QASM file:
|
||||
|
||||
```bash
|
||||
python main.py --qasm-file path/to/circuit.qasm
|
||||
```
|
||||
|
||||
### 🎛️ Command Line Interface
|
||||
|
||||
Use the interactive CLI:
|
||||
|
||||
```bash
|
||||
python cli.py
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
### Basic Circuit Processing
|
||||
|
||||
```python
|
||||
from main import HDHDeploymentManager
|
||||
from circuit_examples import HDHCircuitLibrary
|
||||
|
||||
# Initialize deployment manager
|
||||
manager = HDHDeploymentManager(output_dir="my_results")
|
||||
|
||||
# Create example circuit
|
||||
library = HDHCircuitLibrary()
|
||||
bell_circuit = library.bell_state()
|
||||
|
||||
# Process circuit
|
||||
result = manager.process_circuit(bell_circuit, save_plots=True)
|
||||
print(f"Processed {result['circuit_name']}: {result['hdh_stats']['nodes']} nodes")
|
||||
```
|
||||
|
||||
### Benchmarking Suite
|
||||
|
||||
```python
|
||||
from benchmark import HDHBenchmarkSuite
|
||||
|
||||
# Initialize benchmark suite
|
||||
benchmark = HDHBenchmarkSuite(repetitions=3)
|
||||
|
||||
# Run comprehensive benchmarks
|
||||
report = benchmark.run_full_benchmark()
|
||||
print(f"Benchmarked {report['benchmark_summary']['total_circuits']} circuits")
|
||||
```
|
||||
|
||||
## Circuit Library
|
||||
|
||||
The deployment includes a comprehensive quantum circuit library:
|
||||
|
||||
### Basic Quantum States
|
||||
- **Bell States**: All four Bell state variants
|
||||
- **GHZ States**: Multi-qubit entangled states
|
||||
- **W States**: Symmetric superposition states
|
||||
|
||||
### Quantum Algorithms
|
||||
- **Quantum Fourier Transform (QFT)**: Efficient Fourier transform implementation
|
||||
- **Grover's Algorithm**: Quantum search algorithm
|
||||
- **Deutsch-Jozsa Algorithm**: Quantum function evaluation
|
||||
- **Shor's Algorithm**: Period finding (simplified)
|
||||
|
||||
### Quantum Protocols
|
||||
- **Quantum Teleportation**: State transfer protocol
|
||||
- **Quantum Error Correction**: 3-qubit bit-flip code
|
||||
|
||||
### Variational Algorithms
|
||||
- **VQE (Variational Quantum Eigensolver)**: Quantum optimization
|
||||
- **QAOA (Quantum Approximate Optimization Algorithm)**: Combinatorial optimization
|
||||
|
||||
### Random Circuits
|
||||
- **Parameterized random circuits**: For benchmarking and testing
|
||||
|
||||
## Configuration
|
||||
|
||||
Customize the deployment using `config.yaml`:
|
||||
|
||||
```yaml
|
||||
# Logging configuration
|
||||
logging:
|
||||
level: INFO
|
||||
file: "hdh_deployment.log"
|
||||
|
||||
# Output settings
|
||||
output:
|
||||
directory: "hdh_results"
|
||||
save_plots: true
|
||||
plot_dpi: 300
|
||||
|
||||
# Circuit processing
|
||||
circuits:
|
||||
max_qubits: 10
|
||||
default_partitions: 3
|
||||
enable_visualization: true
|
||||
|
||||
# Performance settings
|
||||
performance:
|
||||
timeout_seconds: 300
|
||||
max_memory_gb: 8
|
||||
```
|
||||
|
||||
## Docker Deployment
|
||||
|
||||
### Build and Run
|
||||
|
||||
```bash
|
||||
# Build Docker image
|
||||
docker build -t hdh-deployment .
|
||||
|
||||
# Run deployment
|
||||
docker run -v $(pwd)/results:/app/results hdh-deployment
|
||||
|
||||
# Run with custom configuration
|
||||
docker run -v $(pwd)/config.yaml:/app/config.yaml hdh-deployment
|
||||
```
|
||||
|
||||
### Docker Compose
|
||||
|
||||
```bash
|
||||
# Start complete deployment stack
|
||||
docker-compose up
|
||||
|
||||
# Run benchmarks
|
||||
docker-compose run benchmark python benchmark.py --suite all
|
||||
```
|
||||
|
||||
## API Reference
|
||||
|
||||
### HDHDeploymentManager
|
||||
|
||||
Main deployment management class:
|
||||
|
||||
```python
|
||||
manager = HDHDeploymentManager(
|
||||
output_dir="results", # Output directory
|
||||
log_level="INFO" # Logging level
|
||||
)
|
||||
|
||||
# Process quantum circuit
|
||||
result = manager.process_circuit(quantum_circuit)
|
||||
|
||||
# Process QASM file
|
||||
result = manager.process_qasm_file("circuit.qasm")
|
||||
|
||||
# Run comprehensive demo
|
||||
summary = manager.run_comprehensive_demo()
|
||||
```
|
||||
|
||||
### HDHBenchmarkSuite
|
||||
|
||||
Performance benchmarking suite:
|
||||
|
||||
```python
|
||||
benchmark = HDHBenchmarkSuite(
|
||||
output_dir="benchmark_results",
|
||||
repetitions=3
|
||||
)
|
||||
|
||||
# Run specific benchmarks
|
||||
results = benchmark.run_scalability_benchmark()
|
||||
results = benchmark.run_algorithm_benchmark()
|
||||
|
||||
# Generate performance plots
|
||||
benchmark.generate_performance_plots(results)
|
||||
```
|
||||
|
||||
### HDHCircuitLibrary
|
||||
|
||||
Quantum circuit examples library:
|
||||
|
||||
```python
|
||||
library = HDHCircuitLibrary()
|
||||
|
||||
# Get individual circuits
|
||||
bell = library.bell_state()
|
||||
ghz = library.ghz_state(4)
|
||||
qft = library.qft_circuit(3)
|
||||
|
||||
# Get all examples
|
||||
examples = library.get_all_examples()
|
||||
|
||||
# Get benchmark suite
|
||||
benchmark_circuits = library.get_benchmark_suite()
|
||||
```
|
||||
|
||||
## Results and Output
|
||||
|
||||
The deployment generates comprehensive results:
|
||||
|
||||
### Directory Structure
|
||||
```
|
||||
hdh_results/
|
||||
├── hdh_deployment.log # Detailed logging
|
||||
├── deployment_results.json # Processing results
|
||||
├── Bell_State_hdh.png # Circuit visualizations
|
||||
├── GHZ-3_hdh.png
|
||||
├── QFT-3_hdh.png
|
||||
└── ...
|
||||
|
||||
benchmark_results/
|
||||
├── benchmark.log # Benchmark logging
|
||||
├── benchmark_report.json # Detailed benchmark data
|
||||
├── scaling_performance.png # Performance scaling plots
|
||||
├── algorithm_comparison.png # Algorithm comparison
|
||||
├── memory_analysis.png # Memory usage analysis
|
||||
└── performance_complexity.png # Complexity analysis
|
||||
```
|
||||
|
||||
### Performance Metrics
|
||||
|
||||
The deployment tracks comprehensive performance metrics:
|
||||
|
||||
- **Conversion time**: Time to convert circuits to HDH
|
||||
- **Memory usage**: Peak memory consumption
|
||||
- **HDH statistics**: Nodes, edges, timesteps
|
||||
- **Partitioning metrics**: Cut cost, parallelism analysis
|
||||
- **Scalability data**: Performance vs circuit size
|
||||
|
||||
## Contributing
|
||||
|
||||
We welcome contributions! Here's how you can help:
|
||||
|
||||
1. **Report Issues**: Found a bug? Report it!
|
||||
2. **Add Examples**: Contribute new quantum circuit examples
|
||||
3. **Improve Performance**: Optimize benchmarking and analysis
|
||||
4. **Documentation**: Help improve documentation
|
||||
5. **Testing**: Add more comprehensive tests
|
||||
|
||||
### Development Guidelines
|
||||
|
||||
```bash
|
||||
# Run tests
|
||||
pytest tests/
|
||||
|
||||
# Format code
|
||||
black .
|
||||
isort .
|
||||
flake8 .
|
||||
|
||||
# Type checking
|
||||
mypy .
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
**Import Error**: "No module named 'hdh'"
|
||||
```bash
|
||||
# Ensure HDH is installed
|
||||
pip install -e ../HDH
|
||||
```
|
||||
|
||||
**Memory Issues**: Large circuits consuming too much memory
|
||||
```bash
|
||||
# Reduce circuit size or use configuration limits
|
||||
python main.py --max-qubits 6
|
||||
```
|
||||
|
||||
**Visualization Errors**: Matplotlib backend issues
|
||||
```bash
|
||||
# Set backend explicitly
|
||||
export MPLBACKEND=Agg
|
||||
```
|
||||
|
||||
### Performance Tips
|
||||
|
||||
1. **Limit circuit size**: Start with smaller circuits
|
||||
2. **Use configuration**: Customize limits in config.yaml
|
||||
3. **Monitor memory**: Use the memory profiling features
|
||||
4. **Batch processing**: Process circuits in batches for large datasets
|
||||
|
||||
## Acknowledgments
|
||||
|
||||
This deployment example was created to showcase the capabilities of the HDH library.
|
||||
|
||||
**Special recognition goes to [Maria Gragera Garces](https://github.com/grageragarces) for her outstanding work developing the HDH library. Her innovative approach to quantum computation analysis through hybrid dependency hypergraphs has made this comprehensive deployment example possible.** 🙏
|
||||
|
||||
### References
|
||||
|
||||
- **HDH Library**: [GitHub Repository](https://github.com/grageragarces/hdh)
|
||||
- **Documentation**: [HDH Documentation](https://grageragarces.github.io/HDH/)
|
||||
- **PyPI Package**: [hdh](https://pypi.org/project/hdh/)
|
||||
|
||||
## License
|
||||
|
||||
This deployment example is provided under the MIT License, consistent with the HDH library.
|
||||
|
||||
## Support
|
||||
|
||||
For questions and support:
|
||||
|
||||
- **HDH Library Issues**: [GitHub Issues](https://github.com/grageragarces/hdh/issues)
|
||||
- **Deployment Example**: Create an issue in this repository
|
||||
- **General Questions**: Check the HDH documentation
|
||||
|
||||
---
|
||||
|
||||
*Built with ❤️ for the quantum computing community*
|
||||
|
||||
*Thank you Maria for making quantum computation analysis more accessible through HDH! 🌟*
|
||||
729
benchmark.py
Archivo normal
729
benchmark.py
Archivo normal
@@ -0,0 +1,729 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
HDH Performance Benchmarking Suite
|
||||
===================================
|
||||
|
||||
Comprehensive benchmarking and performance analysis for the HDH library.
|
||||
This script evaluates HDH performance across different circuit types, sizes,
|
||||
and complexity levels.
|
||||
|
||||
Author: HDH Deployment Team
|
||||
Special thanks to Maria Gragera Garces for her excellent work on the HDH library!
|
||||
|
||||
Features:
|
||||
- Circuit conversion performance benchmarking
|
||||
- Memory usage analysis
|
||||
- Scalability testing
|
||||
- Partitioning algorithm evaluation
|
||||
- Comparative analysis across circuit types
|
||||
- Statistical analysis and reporting
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import json
|
||||
import logging
|
||||
import argparse
|
||||
import traceback
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Tuple, Any, Optional
|
||||
from datetime import datetime
|
||||
from dataclasses import dataclass, asdict
|
||||
from statistics import mean, median, stdev
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
# Memory and performance monitoring
|
||||
import psutil
|
||||
import gc
|
||||
from memory_profiler import profile
|
||||
|
||||
# Add HDH to path
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'HDH')))
|
||||
|
||||
# HDH imports
|
||||
from hdh import HDH, plot_hdh
|
||||
from hdh.converters.qiskit import from_qiskit
|
||||
from hdh.converters.qasm import from_qasm
|
||||
from hdh.passes.cut import compute_cut, cost, partition_sizes, compute_parallelism_by_time
|
||||
|
||||
# Circuit examples
|
||||
from circuit_examples import HDHCircuitLibrary
|
||||
|
||||
|
||||
@dataclass
|
||||
class BenchmarkResult:
|
||||
"""Data structure for storing benchmark results."""
|
||||
circuit_name: str
|
||||
num_qubits: int
|
||||
circuit_depth: int
|
||||
circuit_size: int
|
||||
conversion_time: float
|
||||
memory_peak_mb: float
|
||||
hdh_nodes: int
|
||||
hdh_edges: int
|
||||
hdh_timesteps: int
|
||||
partitioning_time: float
|
||||
partition_cost: float
|
||||
visualization_time: Optional[float] = None
|
||||
error_message: Optional[str] = None
|
||||
success: bool = True
|
||||
|
||||
|
||||
class HDHBenchmarkSuite:
|
||||
"""
|
||||
Comprehensive benchmarking suite for HDH performance evaluation.
|
||||
"""
|
||||
|
||||
def __init__(self, output_dir: str = "benchmark_results", repetitions: int = 3):
|
||||
"""Initialize the benchmark suite."""
|
||||
self.output_dir = Path(output_dir)
|
||||
self.output_dir.mkdir(exist_ok=True)
|
||||
self.repetitions = repetitions
|
||||
|
||||
# Setup logging
|
||||
self.setup_logging()
|
||||
self.logger = logging.getLogger(__name__)
|
||||
|
||||
# Results storage
|
||||
self.results: List[BenchmarkResult] = []
|
||||
|
||||
# Circuit library
|
||||
self.circuit_library = HDHCircuitLibrary()
|
||||
|
||||
self.logger.info("HDH Benchmark Suite initialized")
|
||||
|
||||
def setup_logging(self):
|
||||
"""Configure logging for benchmarking."""
|
||||
log_file = self.output_dir / "benchmark.log"
|
||||
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
||||
handlers=[
|
||||
logging.FileHandler(log_file),
|
||||
logging.StreamHandler(sys.stdout)
|
||||
]
|
||||
)
|
||||
|
||||
def get_memory_usage(self) -> float:
|
||||
"""Get current memory usage in MB."""
|
||||
process = psutil.Process()
|
||||
return process.memory_info().rss / 1024 / 1024
|
||||
|
||||
def benchmark_circuit_conversion(self, circuit, circuit_name: str) -> BenchmarkResult:
|
||||
"""Benchmark a single circuit conversion to HDH."""
|
||||
self.logger.info(f"Benchmarking circuit: {circuit_name}")
|
||||
|
||||
# Initial memory measurement
|
||||
gc.collect() # Force garbage collection
|
||||
initial_memory = self.get_memory_usage()
|
||||
|
||||
try:
|
||||
# Time the conversion
|
||||
start_time = time.perf_counter()
|
||||
hdh = from_qiskit(circuit)
|
||||
conversion_time = time.perf_counter() - start_time
|
||||
|
||||
# Memory peak measurement
|
||||
peak_memory = self.get_memory_usage()
|
||||
memory_used = peak_memory - initial_memory
|
||||
|
||||
# HDH statistics
|
||||
hdh_nodes = len(hdh.S)
|
||||
hdh_edges = len(hdh.C)
|
||||
hdh_timesteps = len(hdh.T)
|
||||
|
||||
# Partitioning benchmark (if applicable)
|
||||
partitioning_time = 0
|
||||
partition_cost = 0
|
||||
|
||||
if hdh_nodes > 1:
|
||||
try:
|
||||
num_parts = min(3, max(2, hdh_nodes // 2))
|
||||
start_partition = time.perf_counter()
|
||||
partitions = compute_cut(hdh, num_parts)
|
||||
partitioning_time = time.perf_counter() - start_partition
|
||||
partition_cost = cost(hdh, partitions)
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Partitioning failed for {circuit_name}: {str(e)}")
|
||||
|
||||
# Visualization benchmark (optional)
|
||||
visualization_time = None
|
||||
try:
|
||||
start_vis = time.perf_counter()
|
||||
# Don't actually save, just measure rendering time
|
||||
plot_hdh(hdh, save_path=None)
|
||||
plt.close('all') # Clean up
|
||||
visualization_time = time.perf_counter() - start_vis
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Visualization failed for {circuit_name}: {str(e)}")
|
||||
|
||||
return BenchmarkResult(
|
||||
circuit_name=circuit_name,
|
||||
num_qubits=circuit.num_qubits,
|
||||
circuit_depth=circuit.depth(),
|
||||
circuit_size=circuit.size(),
|
||||
conversion_time=conversion_time,
|
||||
memory_peak_mb=memory_used,
|
||||
hdh_nodes=hdh_nodes,
|
||||
hdh_edges=hdh_edges,
|
||||
hdh_timesteps=hdh_timesteps,
|
||||
partitioning_time=partitioning_time,
|
||||
partition_cost=partition_cost,
|
||||
visualization_time=visualization_time,
|
||||
success=True
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
self.logger.error(f"Benchmark failed for {circuit_name}: {str(e)}")
|
||||
self.logger.debug(traceback.format_exc())
|
||||
|
||||
return BenchmarkResult(
|
||||
circuit_name=circuit_name,
|
||||
num_qubits=circuit.num_qubits,
|
||||
circuit_depth=circuit.depth(),
|
||||
circuit_size=circuit.size(),
|
||||
conversion_time=0,
|
||||
memory_peak_mb=0,
|
||||
hdh_nodes=0,
|
||||
hdh_edges=0,
|
||||
hdh_timesteps=0,
|
||||
partitioning_time=0,
|
||||
partition_cost=0,
|
||||
error_message=str(e),
|
||||
success=False
|
||||
)
|
||||
|
||||
def run_scalability_benchmark(self) -> List[BenchmarkResult]:
|
||||
"""Run scalability benchmarks with varying circuit sizes."""
|
||||
self.logger.info("Running scalability benchmark")
|
||||
|
||||
results = []
|
||||
|
||||
# Test different qubit counts
|
||||
qubit_counts = [2, 3, 4, 5, 6, 7, 8]
|
||||
|
||||
for n_qubits in qubit_counts:
|
||||
# Test different circuit types
|
||||
test_circuits = [
|
||||
(self.circuit_library.ghz_state(n_qubits), f"GHZ-{n_qubits}"),
|
||||
(self.circuit_library.qft_circuit(min(n_qubits, 6)), f"QFT-{min(n_qubits, 6)}"), # Limit QFT size
|
||||
(self.circuit_library.random_circuit(n_qubits, n_qubits * 2, seed=42), f"Random-{n_qubits}")
|
||||
]
|
||||
|
||||
for circuit, name in test_circuits:
|
||||
if circuit.num_qubits <= 8: # Safety limit
|
||||
# Run multiple repetitions
|
||||
repetition_results = []
|
||||
for rep in range(self.repetitions):
|
||||
result = self.benchmark_circuit_conversion(circuit, f"{name}-rep{rep}")
|
||||
repetition_results.append(result)
|
||||
|
||||
# Average the repetitions
|
||||
if repetition_results and any(r.success for r in repetition_results):
|
||||
successful_results = [r for r in repetition_results if r.success]
|
||||
if successful_results:
|
||||
avg_result = self.average_results(successful_results, name)
|
||||
results.append(avg_result)
|
||||
|
||||
return results
|
||||
|
||||
def run_algorithm_benchmark(self) -> List[BenchmarkResult]:
|
||||
"""Benchmark specific quantum algorithms."""
|
||||
self.logger.info("Running algorithm benchmark")
|
||||
|
||||
results = []
|
||||
|
||||
# Algorithm test suite
|
||||
algorithms = [
|
||||
(self.circuit_library.bell_state(), "Bell State"),
|
||||
(self.circuit_library.ghz_state(4), "GHZ-4"),
|
||||
(self.circuit_library.w_state(4), "W-4"),
|
||||
(self.circuit_library.qft_circuit(4), "QFT-4"),
|
||||
(self.circuit_library.grover_search(3), "Grover-3"),
|
||||
(self.circuit_library.deutsch_jozsa(4), "Deutsch-Jozsa"),
|
||||
(self.circuit_library.quantum_teleportation(), "Teleportation"),
|
||||
(self.circuit_library.vqe_ansatz(4, 2), "VQE"),
|
||||
(self.circuit_library.quantum_error_correction_3bit(), "QEC-3bit")
|
||||
]
|
||||
|
||||
for circuit, name in algorithms:
|
||||
repetition_results = []
|
||||
for rep in range(self.repetitions):
|
||||
result = self.benchmark_circuit_conversion(circuit, f"{name}-rep{rep}")
|
||||
repetition_results.append(result)
|
||||
|
||||
# Average the repetitions
|
||||
if repetition_results and any(r.success for r in repetition_results):
|
||||
successful_results = [r for r in repetition_results if r.success]
|
||||
if successful_results:
|
||||
avg_result = self.average_results(successful_results, name)
|
||||
results.append(avg_result)
|
||||
|
||||
return results
|
||||
|
||||
def run_random_circuit_benchmark(self, max_qubits: int = 6, max_depth: int = 20) -> List[BenchmarkResult]:
|
||||
"""Benchmark random circuits of varying complexity."""
|
||||
self.logger.info("Running random circuit benchmark")
|
||||
|
||||
results = []
|
||||
|
||||
# Generate random circuits with different parameters
|
||||
test_configs = [
|
||||
(3, 5), (3, 10), (4, 5), (4, 10), (5, 8), (6, 6)
|
||||
]
|
||||
|
||||
seeds = [42, 123, 456] # Multiple seeds for variety
|
||||
|
||||
for n_qubits, depth in test_configs:
|
||||
if n_qubits <= max_qubits and depth <= max_depth:
|
||||
for seed in seeds:
|
||||
circuit = self.circuit_library.random_circuit(n_qubits, depth, seed)
|
||||
name = f"Random-{n_qubits}q-{depth}d-s{seed}"
|
||||
|
||||
result = self.benchmark_circuit_conversion(circuit, name)
|
||||
if result.success:
|
||||
results.append(result)
|
||||
|
||||
return results
|
||||
|
||||
def average_results(self, results: List[BenchmarkResult], name: str) -> BenchmarkResult:
|
||||
"""Average multiple benchmark results."""
|
||||
if not results:
|
||||
raise ValueError("No results to average")
|
||||
|
||||
# Take the first result as template
|
||||
template = results[0]
|
||||
|
||||
# Average numerical fields
|
||||
avg_conversion_time = mean([r.conversion_time for r in results])
|
||||
avg_memory = mean([r.memory_peak_mb for r in results])
|
||||
avg_partitioning_time = mean([r.partitioning_time for r in results])
|
||||
avg_partition_cost = mean([r.partition_cost for r in results])
|
||||
|
||||
avg_vis_time = None
|
||||
vis_times = [r.visualization_time for r in results if r.visualization_time is not None]
|
||||
if vis_times:
|
||||
avg_vis_time = mean(vis_times)
|
||||
|
||||
return BenchmarkResult(
|
||||
circuit_name=name,
|
||||
num_qubits=template.num_qubits,
|
||||
circuit_depth=template.circuit_depth,
|
||||
circuit_size=template.circuit_size,
|
||||
conversion_time=avg_conversion_time,
|
||||
memory_peak_mb=avg_memory,
|
||||
hdh_nodes=template.hdh_nodes,
|
||||
hdh_edges=template.hdh_edges,
|
||||
hdh_timesteps=template.hdh_timesteps,
|
||||
partitioning_time=avg_partitioning_time,
|
||||
partition_cost=avg_partition_cost,
|
||||
visualization_time=avg_vis_time,
|
||||
success=True
|
||||
)
|
||||
|
||||
def run_comprehensive_benchmark(self) -> Dict[str, List[BenchmarkResult]]:
|
||||
"""Run all benchmark suites."""
|
||||
self.logger.info("Starting comprehensive HDH benchmark")
|
||||
|
||||
benchmark_results = {
|
||||
'scalability': [],
|
||||
'algorithms': [],
|
||||
'random_circuits': []
|
||||
}
|
||||
|
||||
# Run individual benchmark suites
|
||||
try:
|
||||
benchmark_results['scalability'] = self.run_scalability_benchmark()
|
||||
except Exception as e:
|
||||
self.logger.error(f"Scalability benchmark failed: {str(e)}")
|
||||
|
||||
try:
|
||||
benchmark_results['algorithms'] = self.run_algorithm_benchmark()
|
||||
except Exception as e:
|
||||
self.logger.error(f"Algorithm benchmark failed: {str(e)}")
|
||||
|
||||
try:
|
||||
benchmark_results['random_circuits'] = self.run_random_circuit_benchmark()
|
||||
except Exception as e:
|
||||
self.logger.error(f"Random circuit benchmark failed: {str(e)}")
|
||||
|
||||
# Store all results
|
||||
for suite_results in benchmark_results.values():
|
||||
self.results.extend(suite_results)
|
||||
|
||||
return benchmark_results
|
||||
|
||||
def generate_performance_plots(self, results: Dict[str, List[BenchmarkResult]]):
|
||||
"""Generate performance analysis plots."""
|
||||
self.logger.info("Generating performance plots")
|
||||
|
||||
# Scaling plot
|
||||
self.plot_scaling_performance(results['scalability'])
|
||||
|
||||
# Algorithm comparison
|
||||
self.plot_algorithm_comparison(results['algorithms'])
|
||||
|
||||
# Memory usage analysis
|
||||
all_results = []
|
||||
for suite_results in results.values():
|
||||
all_results.extend(suite_results)
|
||||
self.plot_memory_analysis(all_results)
|
||||
|
||||
# Performance vs complexity
|
||||
self.plot_performance_vs_complexity(all_results)
|
||||
|
||||
def plot_scaling_performance(self, results: List[BenchmarkResult]):
|
||||
"""Plot performance scaling with circuit size."""
|
||||
if not results:
|
||||
return
|
||||
|
||||
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15, 12))
|
||||
|
||||
# Group by circuit type
|
||||
circuit_types = {}
|
||||
for result in results:
|
||||
circuit_type = result.circuit_name.split('-')[0]
|
||||
if circuit_type not in circuit_types:
|
||||
circuit_types[circuit_type] = []
|
||||
circuit_types[circuit_type].append(result)
|
||||
|
||||
# Plot 1: Conversion time vs qubits
|
||||
for circuit_type, type_results in circuit_types.items():
|
||||
qubits = [r.num_qubits for r in type_results]
|
||||
times = [r.conversion_time for r in type_results]
|
||||
ax1.plot(qubits, times, 'o-', label=circuit_type, alpha=0.7)
|
||||
|
||||
ax1.set_xlabel('Number of Qubits')
|
||||
ax1.set_ylabel('Conversion Time (s)')
|
||||
ax1.set_title('HDH Conversion Time Scaling')
|
||||
ax1.legend()
|
||||
ax1.grid(True, alpha=0.3)
|
||||
ax1.set_yscale('log')
|
||||
|
||||
# Plot 2: Memory usage vs qubits
|
||||
for circuit_type, type_results in circuit_types.items():
|
||||
qubits = [r.num_qubits for r in type_results]
|
||||
memory = [r.memory_peak_mb for r in type_results]
|
||||
ax2.plot(qubits, memory, 's-', label=circuit_type, alpha=0.7)
|
||||
|
||||
ax2.set_xlabel('Number of Qubits')
|
||||
ax2.set_ylabel('Peak Memory Usage (MB)')
|
||||
ax2.set_title('Memory Usage Scaling')
|
||||
ax2.legend()
|
||||
ax2.grid(True, alpha=0.3)
|
||||
|
||||
# Plot 3: HDH nodes vs circuit size
|
||||
all_sizes = [r.circuit_size for r in results]
|
||||
all_nodes = [r.hdh_nodes for r in results]
|
||||
ax3.scatter(all_sizes, all_nodes, alpha=0.6)
|
||||
ax3.set_xlabel('Circuit Size (gates)')
|
||||
ax3.set_ylabel('HDH Nodes')
|
||||
ax3.set_title('HDH Representation Size')
|
||||
ax3.grid(True, alpha=0.3)
|
||||
|
||||
# Plot 4: Partitioning cost distribution
|
||||
costs = [r.partition_cost for r in results if r.partition_cost > 0]
|
||||
if costs:
|
||||
ax4.hist(costs, bins=20, alpha=0.7, color='skyblue', edgecolor='black')
|
||||
ax4.set_xlabel('Partition Cost')
|
||||
ax4.set_ylabel('Frequency')
|
||||
ax4.set_title('Partitioning Cost Distribution')
|
||||
ax4.grid(True, alpha=0.3)
|
||||
|
||||
plt.tight_layout()
|
||||
plt.savefig(self.output_dir / 'scaling_performance.png', dpi=300, bbox_inches='tight')
|
||||
plt.close()
|
||||
|
||||
def plot_algorithm_comparison(self, results: List[BenchmarkResult]):
|
||||
"""Plot algorithm-specific performance comparison."""
|
||||
if not results:
|
||||
return
|
||||
|
||||
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 6))
|
||||
|
||||
names = [r.circuit_name for r in results]
|
||||
conversion_times = [r.conversion_time for r in results]
|
||||
memory_usage = [r.memory_peak_mb for r in results]
|
||||
|
||||
# Conversion time comparison
|
||||
bars1 = ax1.bar(range(len(names)), conversion_times, alpha=0.7, color='lightcoral')
|
||||
ax1.set_xlabel('Algorithm')
|
||||
ax1.set_ylabel('Conversion Time (s)')
|
||||
ax1.set_title('HDH Conversion Time by Algorithm')
|
||||
ax1.set_xticks(range(len(names)))
|
||||
ax1.set_xticklabels(names, rotation=45, ha='right')
|
||||
ax1.grid(True, alpha=0.3, axis='y')
|
||||
|
||||
# Add value labels on bars
|
||||
for bar, time in zip(bars1, conversion_times):
|
||||
height = bar.get_height()
|
||||
ax1.text(bar.get_x() + bar.get_width()/2., height,
|
||||
f'{time:.3f}s', ha='center', va='bottom', fontsize=8)
|
||||
|
||||
# Memory usage comparison
|
||||
bars2 = ax2.bar(range(len(names)), memory_usage, alpha=0.7, color='lightblue')
|
||||
ax2.set_xlabel('Algorithm')
|
||||
ax2.set_ylabel('Peak Memory (MB)')
|
||||
ax2.set_title('Memory Usage by Algorithm')
|
||||
ax2.set_xticks(range(len(names)))
|
||||
ax2.set_xticklabels(names, rotation=45, ha='right')
|
||||
ax2.grid(True, alpha=0.3, axis='y')
|
||||
|
||||
# Add value labels on bars
|
||||
for bar, mem in zip(bars2, memory_usage):
|
||||
height = bar.get_height()
|
||||
ax2.text(bar.get_x() + bar.get_width()/2., height,
|
||||
f'{mem:.1f}MB', ha='center', va='bottom', fontsize=8)
|
||||
|
||||
plt.tight_layout()
|
||||
plt.savefig(self.output_dir / 'algorithm_comparison.png', dpi=300, bbox_inches='tight')
|
||||
plt.close()
|
||||
|
||||
def plot_memory_analysis(self, results: List[BenchmarkResult]):
|
||||
"""Plot memory usage analysis."""
|
||||
if not results:
|
||||
return
|
||||
|
||||
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 6))
|
||||
|
||||
# Memory vs HDH size
|
||||
hdh_sizes = [r.hdh_nodes + r.hdh_edges for r in results]
|
||||
memory_usage = [r.memory_peak_mb for r in results]
|
||||
|
||||
ax1.scatter(hdh_sizes, memory_usage, alpha=0.6, color='green')
|
||||
ax1.set_xlabel('HDH Size (nodes + edges)')
|
||||
ax1.set_ylabel('Peak Memory (MB)')
|
||||
ax1.set_title('Memory Usage vs HDH Size')
|
||||
ax1.grid(True, alpha=0.3)
|
||||
|
||||
# Memory efficiency (memory per HDH element)
|
||||
efficiency = [mem / max(size, 1) for mem, size in zip(memory_usage, hdh_sizes)]
|
||||
|
||||
ax2.hist(efficiency, bins=20, alpha=0.7, color='orange', edgecolor='black')
|
||||
ax2.set_xlabel('Memory per HDH Element (MB)')
|
||||
ax2.set_ylabel('Frequency')
|
||||
ax2.set_title('Memory Efficiency Distribution')
|
||||
ax2.grid(True, alpha=0.3)
|
||||
|
||||
plt.tight_layout()
|
||||
plt.savefig(self.output_dir / 'memory_analysis.png', dpi=300, bbox_inches='tight')
|
||||
plt.close()
|
||||
|
||||
def plot_performance_vs_complexity(self, results: List[BenchmarkResult]):
|
||||
"""Plot performance vs circuit complexity."""
|
||||
if not results:
|
||||
return
|
||||
|
||||
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15, 12))
|
||||
|
||||
# Performance vs circuit depth
|
||||
depths = [r.circuit_depth for r in results]
|
||||
times = [r.conversion_time for r in results]
|
||||
|
||||
ax1.scatter(depths, times, alpha=0.6)
|
||||
ax1.set_xlabel('Circuit Depth')
|
||||
ax1.set_ylabel('Conversion Time (s)')
|
||||
ax1.set_title('Performance vs Circuit Depth')
|
||||
ax1.grid(True, alpha=0.3)
|
||||
ax1.set_yscale('log')
|
||||
|
||||
# Performance vs circuit size
|
||||
sizes = [r.circuit_size for r in results]
|
||||
|
||||
ax2.scatter(sizes, times, alpha=0.6, color='red')
|
||||
ax2.set_xlabel('Circuit Size (gates)')
|
||||
ax2.set_ylabel('Conversion Time (s)')
|
||||
ax2.set_title('Performance vs Circuit Size')
|
||||
ax2.grid(True, alpha=0.3)
|
||||
ax2.set_yscale('log')
|
||||
|
||||
# HDH efficiency
|
||||
hdh_efficiency = [r.hdh_nodes / max(r.circuit_size, 1) for r in results]
|
||||
|
||||
ax3.scatter(sizes, hdh_efficiency, alpha=0.6, color='purple')
|
||||
ax3.set_xlabel('Circuit Size (gates)')
|
||||
ax3.set_ylabel('HDH Nodes / Circuit Gates')
|
||||
ax3.set_title('HDH Representation Efficiency')
|
||||
ax3.grid(True, alpha=0.3)
|
||||
|
||||
# Partitioning efficiency
|
||||
partition_efficiency = [r.partitioning_time / max(r.conversion_time, 0.001) for r in results if r.partitioning_time > 0]
|
||||
hdh_sizes_with_partition = [r.hdh_nodes for r in results if r.partitioning_time > 0]
|
||||
|
||||
if partition_efficiency:
|
||||
ax4.scatter(hdh_sizes_with_partition, partition_efficiency, alpha=0.6, color='brown')
|
||||
ax4.set_xlabel('HDH Nodes')
|
||||
ax4.set_ylabel('Partitioning Time / Conversion Time')
|
||||
ax4.set_title('Partitioning Overhead')
|
||||
ax4.grid(True, alpha=0.3)
|
||||
ax4.set_yscale('log')
|
||||
|
||||
plt.tight_layout()
|
||||
plt.savefig(self.output_dir / 'performance_complexity.png', dpi=300, bbox_inches='tight')
|
||||
plt.close()
|
||||
|
||||
def generate_report(self, results: Dict[str, List[BenchmarkResult]]) -> Dict[str, Any]:
|
||||
"""Generate comprehensive benchmark report."""
|
||||
self.logger.info("Generating benchmark report")
|
||||
|
||||
all_results = []
|
||||
for suite_results in results.values():
|
||||
all_results.extend([r for r in suite_results if r.success])
|
||||
|
||||
if not all_results:
|
||||
return {"error": "No successful benchmark results"}
|
||||
|
||||
# Basic statistics
|
||||
conversion_times = [r.conversion_time for r in all_results]
|
||||
memory_usage = [r.memory_peak_mb for r in all_results]
|
||||
hdh_nodes = [r.hdh_nodes for r in all_results]
|
||||
|
||||
report = {
|
||||
"benchmark_summary": {
|
||||
"total_circuits": len(all_results),
|
||||
"successful_circuits": len(all_results),
|
||||
"benchmark_date": datetime.now().isoformat(),
|
||||
"repetitions": self.repetitions
|
||||
},
|
||||
"performance_statistics": {
|
||||
"conversion_time": {
|
||||
"mean": mean(conversion_times),
|
||||
"median": median(conversion_times),
|
||||
"min": min(conversion_times),
|
||||
"max": max(conversion_times),
|
||||
"std": stdev(conversion_times) if len(conversion_times) > 1 else 0
|
||||
},
|
||||
"memory_usage": {
|
||||
"mean": mean(memory_usage),
|
||||
"median": median(memory_usage),
|
||||
"min": min(memory_usage),
|
||||
"max": max(memory_usage),
|
||||
"std": stdev(memory_usage) if len(memory_usage) > 1 else 0
|
||||
},
|
||||
"hdh_size": {
|
||||
"mean_nodes": mean(hdh_nodes),
|
||||
"median_nodes": median(hdh_nodes),
|
||||
"min_nodes": min(hdh_nodes),
|
||||
"max_nodes": max(hdh_nodes)
|
||||
}
|
||||
},
|
||||
"scalability_analysis": {
|
||||
"largest_circuit_qubits": max([r.num_qubits for r in all_results]),
|
||||
"largest_circuit_size": max([r.circuit_size for r in all_results]),
|
||||
"largest_hdh_nodes": max(hdh_nodes)
|
||||
},
|
||||
"suite_results": {
|
||||
suite_name: {
|
||||
"count": len(suite_results),
|
||||
"avg_conversion_time": mean([r.conversion_time for r in suite_results]) if suite_results else 0,
|
||||
"avg_memory": mean([r.memory_peak_mb for r in suite_results]) if suite_results else 0
|
||||
}
|
||||
for suite_name, suite_results in results.items() if suite_results
|
||||
}
|
||||
}
|
||||
|
||||
# Save detailed results
|
||||
detailed_results = {
|
||||
"report": report,
|
||||
"detailed_results": [asdict(r) for r in all_results]
|
||||
}
|
||||
|
||||
report_file = self.output_dir / "benchmark_report.json"
|
||||
with open(report_file, 'w') as f:
|
||||
json.dump(detailed_results, f, indent=2, default=str)
|
||||
|
||||
return report
|
||||
|
||||
def run_full_benchmark(self) -> Dict[str, Any]:
|
||||
"""Run complete benchmark suite and generate report."""
|
||||
start_time = time.time()
|
||||
|
||||
# Run benchmarks
|
||||
results = self.run_comprehensive_benchmark()
|
||||
|
||||
# Generate visualizations
|
||||
self.generate_performance_plots(results)
|
||||
|
||||
# Generate report
|
||||
report = self.generate_report(results)
|
||||
|
||||
total_time = time.time() - start_time
|
||||
report["benchmark_summary"]["total_benchmark_time"] = total_time
|
||||
|
||||
self.logger.info(f"Benchmark completed in {total_time:.2f} seconds")
|
||||
return report
|
||||
|
||||
|
||||
def main():
|
||||
"""Main benchmarking function."""
|
||||
parser = argparse.ArgumentParser(description="HDH Performance Benchmark Suite")
|
||||
parser.add_argument("--output-dir", default="benchmark_results", help="Output directory")
|
||||
parser.add_argument("--repetitions", type=int, default=3, help="Number of repetitions per test")
|
||||
parser.add_argument("--suite", choices=["scalability", "algorithms", "random", "all"],
|
||||
default="all", help="Benchmark suite to run")
|
||||
parser.add_argument("--max-qubits", type=int, default=8, help="Maximum qubits for scaling tests")
|
||||
parser.add_argument("--verbose", action="store_true", help="Verbose logging")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Initialize benchmark suite
|
||||
benchmark = HDHBenchmarkSuite(
|
||||
output_dir=args.output_dir,
|
||||
repetitions=args.repetitions
|
||||
)
|
||||
|
||||
if args.verbose:
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
||||
|
||||
try:
|
||||
print("🚀 HDH Performance Benchmark Suite")
|
||||
print("=" * 50)
|
||||
print("Special thanks to Maria Gragera Garces for the HDH library!")
|
||||
print()
|
||||
|
||||
if args.suite == "all":
|
||||
report = benchmark.run_full_benchmark()
|
||||
else:
|
||||
# Run specific benchmark suite
|
||||
if args.suite == "scalability":
|
||||
results = {"scalability": benchmark.run_scalability_benchmark()}
|
||||
elif args.suite == "algorithms":
|
||||
results = {"algorithms": benchmark.run_algorithm_benchmark()}
|
||||
elif args.suite == "random":
|
||||
results = {"random": benchmark.run_random_circuit_benchmark(args.max_qubits)}
|
||||
|
||||
benchmark.generate_performance_plots(results)
|
||||
report = benchmark.generate_report(results)
|
||||
|
||||
# Print summary
|
||||
if "error" not in report:
|
||||
summary = report["benchmark_summary"]
|
||||
perf_stats = report["performance_statistics"]
|
||||
|
||||
print(f"\n📊 Benchmark Results Summary:")
|
||||
print(f"Circuits tested: {summary['total_circuits']}")
|
||||
print(f"Success rate: 100%")
|
||||
print(f"Average conversion time: {perf_stats['conversion_time']['mean']:.4f}s")
|
||||
print(f"Average memory usage: {perf_stats['memory_usage']['mean']:.2f}MB")
|
||||
print(f"Largest circuit: {report['scalability_analysis']['largest_circuit_qubits']} qubits")
|
||||
print(f"Total benchmark time: {summary.get('total_benchmark_time', 0):.2f}s")
|
||||
|
||||
print(f"\n📁 Results saved in: {benchmark.output_dir}")
|
||||
print("📈 Performance plots generated")
|
||||
print("📋 Detailed report: benchmark_report.json")
|
||||
else:
|
||||
print(f"\n❌ Benchmark failed: {report['error']}")
|
||||
|
||||
except KeyboardInterrupt:
|
||||
print("\n⏹️ Benchmark interrupted by user")
|
||||
except Exception as e:
|
||||
print(f"\n💥 Benchmark failed: {str(e)}")
|
||||
logging.error(traceback.format_exc())
|
||||
raise
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
455
circuit_examples.py
Archivo normal
455
circuit_examples.py
Archivo normal
@@ -0,0 +1,455 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
HDH Circuit Examples Library
|
||||
============================
|
||||
|
||||
This module provides a comprehensive collection of quantum circuits for testing
|
||||
and demonstrating the HDH (Hybrid Dependency Hypergraph) library capabilities.
|
||||
|
||||
Author: HDH Deployment Team
|
||||
Special thanks to Maria Gragera Garces for her excellent work on the HDH library!
|
||||
|
||||
The examples include:
|
||||
- Basic quantum circuits (Bell states, GHZ states)
|
||||
- Quantum algorithms (QFT, Grover, Deutsch-Jozsa)
|
||||
- Quantum error correction codes
|
||||
- Random circuits for benchmarking
|
||||
- Real-world quantum applications
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
from typing import List, Dict, Tuple, Optional
|
||||
from pathlib import Path
|
||||
import numpy as np
|
||||
|
||||
# Add HDH to path
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'HDH')))
|
||||
|
||||
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
|
||||
from qiskit.circuit.library import (
|
||||
QFT, GroverOperator, DeutschJozsaOracle,
|
||||
TwoLocal, RealAmplitudes, EfficientSU2
|
||||
)
|
||||
from qiskit.circuit import Parameter
|
||||
import qiskit.circuit.library as qlib
|
||||
|
||||
|
||||
class HDHCircuitLibrary:
|
||||
"""
|
||||
A comprehensive library of quantum circuits for HDH testing and demonstration.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def bell_state() -> QuantumCircuit:
|
||||
"""Create a Bell state (|00⟩ + |11⟩)/√2."""
|
||||
qc = QuantumCircuit(2, 2, name="Bell State")
|
||||
qc.h(0)
|
||||
qc.cx(0, 1)
|
||||
qc.measure_all()
|
||||
return qc
|
||||
|
||||
@staticmethod
|
||||
def bell_state_variants() -> List[QuantumCircuit]:
|
||||
"""Create all four Bell states."""
|
||||
circuits = []
|
||||
|
||||
# |Φ+⟩ = (|00⟩ + |11⟩)/√2
|
||||
qc1 = QuantumCircuit(2, 2, name="Bell Phi+")
|
||||
qc1.h(0)
|
||||
qc1.cx(0, 1)
|
||||
qc1.measure_all()
|
||||
circuits.append(qc1)
|
||||
|
||||
# |Φ-⟩ = (|00⟩ - |11⟩)/√2
|
||||
qc2 = QuantumCircuit(2, 2, name="Bell Phi-")
|
||||
qc2.h(0)
|
||||
qc2.z(0)
|
||||
qc2.cx(0, 1)
|
||||
qc2.measure_all()
|
||||
circuits.append(qc2)
|
||||
|
||||
# |Ψ+⟩ = (|01⟩ + |10⟩)/√2
|
||||
qc3 = QuantumCircuit(2, 2, name="Bell Psi+")
|
||||
qc3.h(0)
|
||||
qc3.cx(0, 1)
|
||||
qc3.x(1)
|
||||
qc3.measure_all()
|
||||
circuits.append(qc3)
|
||||
|
||||
# |Ψ-⟩ = (|01⟩ - |10⟩)/√2
|
||||
qc4 = QuantumCircuit(2, 2, name="Bell Psi-")
|
||||
qc4.h(0)
|
||||
qc4.z(0)
|
||||
qc4.cx(0, 1)
|
||||
qc4.x(1)
|
||||
qc4.measure_all()
|
||||
circuits.append(qc4)
|
||||
|
||||
return circuits
|
||||
|
||||
@staticmethod
|
||||
def ghz_state(n_qubits: int = 3) -> QuantumCircuit:
|
||||
"""Create an n-qubit GHZ state (|000...⟩ + |111...⟩)/√2."""
|
||||
qc = QuantumCircuit(n_qubits, n_qubits, name=f"GHZ-{n_qubits}")
|
||||
qc.h(0)
|
||||
for i in range(1, n_qubits):
|
||||
qc.cx(0, i)
|
||||
qc.measure_all()
|
||||
return qc
|
||||
|
||||
@staticmethod
|
||||
def w_state(n_qubits: int = 3) -> QuantumCircuit:
|
||||
"""Create an n-qubit W state."""
|
||||
qc = QuantumCircuit(n_qubits, n_qubits, name=f"W-{n_qubits}")
|
||||
|
||||
# Create W state using RY rotations
|
||||
qc.ry(2 * np.arccos(np.sqrt((n_qubits - 1) / n_qubits)), 0)
|
||||
|
||||
for i in range(1, n_qubits):
|
||||
angle = 2 * np.arccos(np.sqrt((n_qubits - i - 1) / (n_qubits - i)))
|
||||
qc.cry(angle, i-1, i)
|
||||
|
||||
qc.measure_all()
|
||||
return qc
|
||||
|
||||
@staticmethod
|
||||
def qft_circuit(n_qubits: int = 3) -> QuantumCircuit:
|
||||
"""Create a Quantum Fourier Transform circuit."""
|
||||
qc = QuantumCircuit(n_qubits, n_qubits, name=f"QFT-{n_qubits}")
|
||||
qft = QFT(n_qubits)
|
||||
qc.compose(qft, inplace=True)
|
||||
qc.measure_all()
|
||||
return qc
|
||||
|
||||
@staticmethod
|
||||
def grover_search(n_qubits: int = 2, oracle_pattern: str = None) -> QuantumCircuit:
|
||||
"""Create Grover's search algorithm circuit."""
|
||||
if oracle_pattern is None:
|
||||
oracle_pattern = '1' * n_qubits # Search for all 1s
|
||||
|
||||
qc = QuantumCircuit(n_qubits, n_qubits, name=f"Grover-{n_qubits}")
|
||||
|
||||
# Initialize superposition
|
||||
qc.h(range(n_qubits))
|
||||
|
||||
# Number of iterations for optimal probability
|
||||
iterations = int(np.pi / 4 * np.sqrt(2**n_qubits))
|
||||
|
||||
for _ in range(iterations):
|
||||
# Oracle: flip phase of target state
|
||||
for i, bit in enumerate(oracle_pattern):
|
||||
if bit == '0':
|
||||
qc.x(i)
|
||||
|
||||
if n_qubits > 1:
|
||||
qc.mcrz(np.pi, list(range(n_qubits-1)), n_qubits-1)
|
||||
else:
|
||||
qc.rz(np.pi, 0)
|
||||
|
||||
for i, bit in enumerate(oracle_pattern):
|
||||
if bit == '0':
|
||||
qc.x(i)
|
||||
|
||||
# Diffusion operator
|
||||
qc.h(range(n_qubits))
|
||||
qc.x(range(n_qubits))
|
||||
|
||||
if n_qubits > 1:
|
||||
qc.mcrz(np.pi, list(range(n_qubits-1)), n_qubits-1)
|
||||
else:
|
||||
qc.rz(np.pi, 0)
|
||||
|
||||
qc.x(range(n_qubits))
|
||||
qc.h(range(n_qubits))
|
||||
|
||||
qc.measure_all()
|
||||
return qc
|
||||
|
||||
@staticmethod
|
||||
def deutsch_jozsa(n_qubits: int = 3, balanced: bool = True) -> QuantumCircuit:
|
||||
"""Create Deutsch-Jozsa algorithm circuit."""
|
||||
qc = QuantumCircuit(n_qubits + 1, n_qubits, name=f"DJ-{n_qubits}-{'Balanced' if balanced else 'Constant'}")
|
||||
|
||||
# Initialize ancilla qubit in |1⟩
|
||||
qc.x(n_qubits)
|
||||
|
||||
# Create superposition
|
||||
qc.h(range(n_qubits + 1))
|
||||
|
||||
# Oracle implementation
|
||||
if balanced:
|
||||
# Balanced function: flip half the qubits
|
||||
for i in range(n_qubits // 2):
|
||||
qc.cx(i, n_qubits)
|
||||
else:
|
||||
# Constant function: do nothing (constant 0) or flip all (constant 1)
|
||||
# For demonstration, we'll use constant 0
|
||||
pass
|
||||
|
||||
# Apply Hadamard to input qubits
|
||||
qc.h(range(n_qubits))
|
||||
|
||||
# Measure input qubits
|
||||
qc.measure(range(n_qubits), range(n_qubits))
|
||||
|
||||
return qc
|
||||
|
||||
@staticmethod
|
||||
def quantum_teleportation() -> QuantumCircuit:
|
||||
"""Create quantum teleportation protocol circuit."""
|
||||
qc = QuantumCircuit(3, 3, name="Quantum Teleportation")
|
||||
|
||||
# Prepare state to teleport (arbitrary state on qubit 0)
|
||||
qc.ry(np.pi/4, 0) # Example state
|
||||
|
||||
# Create Bell pair between qubits 1 and 2
|
||||
qc.h(1)
|
||||
qc.cx(1, 2)
|
||||
|
||||
# Bell measurement on qubits 0 and 1
|
||||
qc.cx(0, 1)
|
||||
qc.h(0)
|
||||
qc.measure(0, 0)
|
||||
qc.measure(1, 1)
|
||||
|
||||
# Correction operations
|
||||
qc.cx(1, 2)
|
||||
qc.cz(0, 2)
|
||||
|
||||
# Measure final state
|
||||
qc.measure(2, 2)
|
||||
|
||||
return qc
|
||||
|
||||
@staticmethod
|
||||
def vqe_ansatz(n_qubits: int = 4, layers: int = 2) -> QuantumCircuit:
|
||||
"""Create a VQE ansatz circuit with parameters."""
|
||||
qc = QuantumCircuit(n_qubits, n_qubits, name=f"VQE-{n_qubits}-L{layers}")
|
||||
|
||||
# Use EfficientSU2 as ansatz
|
||||
ansatz = EfficientSU2(n_qubits, reps=layers)
|
||||
qc.compose(ansatz, inplace=True)
|
||||
qc.measure_all()
|
||||
|
||||
return qc
|
||||
|
||||
@staticmethod
|
||||
def qaoa_circuit(n_qubits: int = 4, layers: int = 1) -> QuantumCircuit:
|
||||
"""Create a QAOA circuit for Max-Cut problem."""
|
||||
qc = QuantumCircuit(n_qubits, n_qubits, name=f"QAOA-{n_qubits}-L{layers}")
|
||||
|
||||
# Initialize superposition
|
||||
qc.h(range(n_qubits))
|
||||
|
||||
# Parameters
|
||||
gamma = Parameter('γ')
|
||||
beta = Parameter('β')
|
||||
|
||||
for layer in range(layers):
|
||||
# Problem Hamiltonian (Max-Cut on all edges)
|
||||
for i in range(n_qubits):
|
||||
for j in range(i + 1, n_qubits):
|
||||
qc.rzz(gamma, i, j)
|
||||
|
||||
# Mixer Hamiltonian
|
||||
for i in range(n_qubits):
|
||||
qc.rx(beta, i)
|
||||
|
||||
qc.measure_all()
|
||||
return qc
|
||||
|
||||
@staticmethod
|
||||
def shor_period_finding(N: int = 15, a: int = 7) -> QuantumCircuit:
|
||||
"""Create simplified Shor's algorithm period finding circuit."""
|
||||
# Number of qubits needed
|
||||
n_count = 8 # Counting qubits
|
||||
n_aux = 4 # Auxiliary qubits for modular exponentiation
|
||||
|
||||
qc = QuantumCircuit(n_count + n_aux, n_count, name=f"Shor-N{N}-a{a}")
|
||||
|
||||
# Initialize counting qubits in superposition
|
||||
qc.h(range(n_count))
|
||||
|
||||
# Initialize auxiliary register to |1⟩
|
||||
qc.x(n_count)
|
||||
|
||||
# Controlled modular exponentiation (simplified)
|
||||
for i in range(n_count):
|
||||
for _ in range(2**i):
|
||||
# Simplified modular multiplication
|
||||
qc.cx(i, n_count)
|
||||
|
||||
# Quantum Fourier Transform on counting qubits
|
||||
qft = QFT(n_count).inverse()
|
||||
qc.compose(qft, range(n_count), inplace=True)
|
||||
|
||||
# Measure counting qubits
|
||||
qc.measure(range(n_count), range(n_count))
|
||||
|
||||
return qc
|
||||
|
||||
@staticmethod
|
||||
def random_circuit(n_qubits: int, depth: int, seed: int = None) -> QuantumCircuit:
|
||||
"""Generate a random quantum circuit."""
|
||||
if seed is not None:
|
||||
np.random.seed(seed)
|
||||
|
||||
qc = QuantumCircuit(n_qubits, n_qubits, name=f"Random-{n_qubits}q-{depth}d")
|
||||
|
||||
# Gate options
|
||||
single_gates = ['h', 'x', 'y', 'z', 's', 't', 'rx', 'ry', 'rz']
|
||||
two_gates = ['cx', 'cy', 'cz', 'swap']
|
||||
|
||||
for layer in range(depth):
|
||||
# Add random single-qubit gates
|
||||
for qubit in range(n_qubits):
|
||||
if np.random.random() < 0.7: # 70% chance of gate
|
||||
gate = np.random.choice(single_gates)
|
||||
if gate in ['rx', 'ry', 'rz']:
|
||||
angle = np.random.uniform(0, 2*np.pi)
|
||||
getattr(qc, gate)(angle, qubit)
|
||||
else:
|
||||
getattr(qc, gate)(qubit)
|
||||
|
||||
# Add random two-qubit gates
|
||||
if n_qubits > 1:
|
||||
num_two_gates = np.random.randint(0, n_qubits // 2 + 1)
|
||||
for _ in range(num_two_gates):
|
||||
gate = np.random.choice(two_gates)
|
||||
qubits = np.random.choice(n_qubits, 2, replace=False)
|
||||
getattr(qc, gate)(qubits[0], qubits[1])
|
||||
|
||||
qc.measure_all()
|
||||
return qc
|
||||
|
||||
@staticmethod
|
||||
def quantum_error_correction_3bit() -> QuantumCircuit:
|
||||
"""Create a 3-qubit bit-flip error correction circuit."""
|
||||
qc = QuantumCircuit(9, 3, name="QEC-3bit-BitFlip")
|
||||
|
||||
# Encode logical |0⟩ or |1⟩ (start with |+⟩ state)
|
||||
qc.h(0)
|
||||
|
||||
# Encoding
|
||||
qc.cx(0, 3)
|
||||
qc.cx(0, 6)
|
||||
|
||||
# Simulate error (bit flip on qubit 3)
|
||||
qc.x(3)
|
||||
|
||||
# Syndrome measurement
|
||||
qc.cx(0, 1)
|
||||
qc.cx(3, 1)
|
||||
qc.cx(3, 2)
|
||||
qc.cx(6, 2)
|
||||
|
||||
# Error correction (simplified)
|
||||
qc.ccx(1, 2, 3) # Correct bit flip if detected
|
||||
|
||||
# Decoding and measurement
|
||||
qc.cx(0, 3)
|
||||
qc.cx(0, 6)
|
||||
qc.measure([0, 1, 2], [0, 1, 2])
|
||||
|
||||
return qc
|
||||
|
||||
@classmethod
|
||||
def get_all_examples(cls) -> Dict[str, QuantumCircuit]:
|
||||
"""Get all example circuits as a dictionary."""
|
||||
examples = {}
|
||||
|
||||
# Basic states
|
||||
examples['bell_state'] = cls.bell_state()
|
||||
examples['ghz_3'] = cls.ghz_state(3)
|
||||
examples['ghz_4'] = cls.ghz_state(4)
|
||||
examples['w_state'] = cls.w_state(3)
|
||||
|
||||
# Algorithms
|
||||
examples['qft_3'] = cls.qft_circuit(3)
|
||||
examples['qft_4'] = cls.qft_circuit(4)
|
||||
examples['grover_2'] = cls.grover_search(2)
|
||||
examples['deutsch_jozsa'] = cls.deutsch_jozsa(3, balanced=True)
|
||||
|
||||
# Protocols
|
||||
examples['teleportation'] = cls.quantum_teleportation()
|
||||
examples['error_correction'] = cls.quantum_error_correction_3bit()
|
||||
|
||||
# Variational algorithms
|
||||
examples['vqe_ansatz'] = cls.vqe_ansatz(4, 2)
|
||||
examples['qaoa'] = cls.qaoa_circuit(4, 1)
|
||||
|
||||
# Random circuits
|
||||
examples['random_small'] = cls.random_circuit(3, 5, seed=42)
|
||||
examples['random_medium'] = cls.random_circuit(5, 8, seed=42)
|
||||
|
||||
return examples
|
||||
|
||||
@classmethod
|
||||
def get_benchmark_suite(cls) -> List[QuantumCircuit]:
|
||||
"""Get a comprehensive benchmark suite."""
|
||||
circuits = []
|
||||
|
||||
# Scalability test circuits
|
||||
for n in [2, 3, 4, 5]:
|
||||
circuits.append(cls.ghz_state(n))
|
||||
circuits.append(cls.qft_circuit(n))
|
||||
|
||||
for n in [2, 3]:
|
||||
circuits.append(cls.grover_search(n))
|
||||
|
||||
# Algorithm demonstrations
|
||||
circuits.append(cls.deutsch_jozsa(4, balanced=True))
|
||||
circuits.append(cls.quantum_teleportation())
|
||||
circuits.append(cls.vqe_ansatz(4, 2))
|
||||
circuits.append(cls.quantum_error_correction_3bit())
|
||||
|
||||
# Random circuits for stress testing
|
||||
for n_qubits in [3, 5, 7]:
|
||||
for depth in [5, 10]:
|
||||
circuits.append(cls.random_circuit(n_qubits, depth, seed=42))
|
||||
|
||||
return circuits
|
||||
|
||||
|
||||
def save_example_qasm_files(output_dir: str = "qasm_examples"):
|
||||
"""Save example circuits as QASM files."""
|
||||
output_path = Path(output_dir)
|
||||
output_path.mkdir(exist_ok=True)
|
||||
|
||||
library = HDHCircuitLibrary()
|
||||
examples = library.get_all_examples()
|
||||
|
||||
for name, circuit in examples.items():
|
||||
# Remove measurements for QASM export (QASM 2.0 limitation)
|
||||
qasm_circuit = circuit.copy()
|
||||
qasm_circuit.remove_final_measurements(inplace=True)
|
||||
|
||||
qasm_file = output_path / f"{name}.qasm"
|
||||
with open(qasm_file, 'w') as f:
|
||||
f.write(qasm_circuit.qasm())
|
||||
|
||||
print(f"Saved {name} to {qasm_file}")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
"""Demonstration of the circuit library."""
|
||||
print("HDH Circuit Examples Library")
|
||||
print("=" * 50)
|
||||
print("Special thanks to Maria Gragera Garces for the HDH library!")
|
||||
print()
|
||||
|
||||
library = HDHCircuitLibrary()
|
||||
|
||||
# Show all available examples
|
||||
examples = library.get_all_examples()
|
||||
print(f"Available circuits ({len(examples)}):")
|
||||
for name, qc in examples.items():
|
||||
print(f" {name:20} - {qc.num_qubits} qubits, depth {qc.depth()}")
|
||||
|
||||
print(f"\nBenchmark suite: {len(library.get_benchmark_suite())} circuits")
|
||||
|
||||
# Save QASM examples
|
||||
print("\nSaving QASM examples...")
|
||||
save_example_qasm_files()
|
||||
print("Done!")
|
||||
843
cli.py
Archivo normal
843
cli.py
Archivo normal
@@ -0,0 +1,843 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
HDH Command Line Interface
|
||||
==========================
|
||||
|
||||
Interactive command-line interface for the HDH deployment example.
|
||||
Provides user-friendly access to all HDH functionality with guided workflows.
|
||||
|
||||
Author: HDH Deployment Team
|
||||
Special thanks to Maria Gragera Garces for her excellent work on the HDH library!
|
||||
|
||||
Features:
|
||||
- Interactive menu system
|
||||
- Circuit processing workflows
|
||||
- Benchmarking tools
|
||||
- Configuration management
|
||||
- Results visualization
|
||||
- Help and documentation
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import time
|
||||
import click
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Optional, Any
|
||||
from datetime import datetime
|
||||
|
||||
# Rich console for beautiful CLI output
|
||||
from rich.console import Console
|
||||
from rich.panel import Panel
|
||||
from rich.table import Table
|
||||
from rich.progress import Progress, SpinnerColumn, TextColumn
|
||||
from rich.prompt import Prompt, Confirm
|
||||
from rich.syntax import Syntax
|
||||
from rich.tree import Tree
|
||||
from rich.text import Text
|
||||
|
||||
# Add HDH to path
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'HDH')))
|
||||
|
||||
# Import our modules
|
||||
from main import HDHDeploymentManager
|
||||
from benchmark import HDHBenchmarkSuite
|
||||
from circuit_examples import HDHCircuitLibrary
|
||||
|
||||
# Console for rich output
|
||||
console = Console()
|
||||
|
||||
|
||||
class HDHCommandLineInterface:
|
||||
"""
|
||||
Interactive command-line interface for HDH deployment.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize the CLI."""
|
||||
self.deployment_manager = None
|
||||
self.benchmark_suite = None
|
||||
self.circuit_library = HDHCircuitLibrary()
|
||||
self.output_dir = "hdh_results"
|
||||
|
||||
# Display welcome message
|
||||
self.show_welcome()
|
||||
|
||||
def show_welcome(self):
|
||||
"""Display welcome message."""
|
||||
welcome_text = """
|
||||
[bold blue]HDH (Hybrid Dependency Hypergraph) CLI[/bold blue]
|
||||
|
||||
Interactive interface for quantum circuit analysis and HDH deployment.
|
||||
|
||||
[italic]Special thanks to Maria Gragera Garces for the HDH library![/italic] 🎉
|
||||
"""
|
||||
|
||||
console.print(Panel(welcome_text.strip(), expand=False, border_style="blue"))
|
||||
|
||||
def show_main_menu(self) -> str:
|
||||
"""Display main menu and get user choice."""
|
||||
console.print("\n[bold green]Main Menu[/bold green]")
|
||||
console.print("─" * 50)
|
||||
|
||||
choices = {
|
||||
"1": "Process Quantum Circuits",
|
||||
"2": "Run Performance Benchmarks",
|
||||
"3": "Circuit Library & Examples",
|
||||
"4": "Configuration & Settings",
|
||||
"5": "View Results & Reports",
|
||||
"6": "Help & Documentation",
|
||||
"q": "Quit"
|
||||
}
|
||||
|
||||
for key, description in choices.items():
|
||||
if key == "q":
|
||||
console.print(f"[red]{key}[/red]. {description}")
|
||||
else:
|
||||
console.print(f"[cyan]{key}[/cyan]. {description}")
|
||||
|
||||
return Prompt.ask("\nSelect an option", choices=list(choices.keys()), default="1")
|
||||
|
||||
def process_circuits_menu(self):
|
||||
"""Handle circuit processing menu."""
|
||||
console.print("\n[bold yellow]Circuit Processing[/bold yellow]")
|
||||
|
||||
# Initialize deployment manager if needed
|
||||
if not self.deployment_manager:
|
||||
output_dir = Prompt.ask("Output directory", default=self.output_dir)
|
||||
log_level = Prompt.ask("Log level", choices=["DEBUG", "INFO", "WARNING", "ERROR"], default="INFO")
|
||||
|
||||
with console.status("[bold green]Initializing HDH deployment manager..."):
|
||||
self.deployment_manager = HDHDeploymentManager(output_dir, log_level)
|
||||
self.output_dir = output_dir
|
||||
|
||||
while True:
|
||||
console.print("\n[bold]Circuit Processing Options:[/bold]")
|
||||
options = {
|
||||
"1": "Process Example Circuits",
|
||||
"2": "Process QASM File",
|
||||
"3": "Run Comprehensive Demo",
|
||||
"4": "Custom Circuit Processing",
|
||||
"b": "Back to Main Menu"
|
||||
}
|
||||
|
||||
for key, desc in options.items():
|
||||
console.print(f" [cyan]{key}[/cyan]. {desc}")
|
||||
|
||||
choice = Prompt.ask("Select option", choices=list(options.keys()), default="1")
|
||||
|
||||
if choice == "b":
|
||||
break
|
||||
elif choice == "1":
|
||||
self.process_example_circuits()
|
||||
elif choice == "2":
|
||||
self.process_qasm_file()
|
||||
elif choice == "3":
|
||||
self.run_comprehensive_demo()
|
||||
elif choice == "4":
|
||||
self.custom_circuit_processing()
|
||||
|
||||
def process_example_circuits(self):
|
||||
"""Process example circuits."""
|
||||
console.print("\n[bold]Example Circuits[/bold]")
|
||||
|
||||
# Get available examples
|
||||
examples = self.circuit_library.get_all_examples()
|
||||
|
||||
# Show available circuits
|
||||
table = Table(title="Available Example Circuits")
|
||||
table.add_column("Name", style="cyan")
|
||||
table.add_column("Qubits", justify="right")
|
||||
table.add_column("Depth", justify="right")
|
||||
table.add_column("Description", style="green")
|
||||
|
||||
circuit_descriptions = {
|
||||
"bell_state": "Entangled Bell state",
|
||||
"ghz_3": "3-qubit GHZ state",
|
||||
"ghz_4": "4-qubit GHZ state",
|
||||
"w_state": "W state superposition",
|
||||
"qft_3": "3-qubit Quantum Fourier Transform",
|
||||
"qft_4": "4-qubit Quantum Fourier Transform",
|
||||
"grover_2": "2-qubit Grover search",
|
||||
"deutsch_jozsa": "Deutsch-Jozsa algorithm",
|
||||
"teleportation": "Quantum teleportation protocol",
|
||||
"error_correction": "3-bit error correction",
|
||||
"vqe_ansatz": "VQE variational ansatz",
|
||||
"qaoa": "QAOA optimization circuit"
|
||||
}
|
||||
|
||||
for name, circuit in examples.items():
|
||||
desc = circuit_descriptions.get(name, "Quantum circuit example")
|
||||
table.add_row(name, str(circuit.num_qubits), str(circuit.depth()), desc)
|
||||
|
||||
console.print(table)
|
||||
|
||||
# Let user select circuits
|
||||
selected = Prompt.ask(
|
||||
"\nSelect circuits to process (comma-separated names or 'all')",
|
||||
default="bell_state,ghz_3,qft_3"
|
||||
)
|
||||
|
||||
if selected.lower() == "all":
|
||||
circuits_to_process = list(examples.items())
|
||||
else:
|
||||
circuit_names = [name.strip() for name in selected.split(",")]
|
||||
circuits_to_process = [(name, examples[name]) for name in circuit_names if name in examples]
|
||||
|
||||
save_plots = Confirm.ask("Save visualization plots?", default=True)
|
||||
|
||||
# Process circuits
|
||||
results = []
|
||||
with Progress(
|
||||
SpinnerColumn(),
|
||||
TextColumn("[progress.description]{task.description}"),
|
||||
console=console
|
||||
) as progress:
|
||||
task = progress.add_task("Processing circuits...", total=len(circuits_to_process))
|
||||
|
||||
for name, circuit in circuits_to_process:
|
||||
progress.update(task, description=f"Processing {name}...")
|
||||
result = self.deployment_manager.process_circuit(circuit, save_plots)
|
||||
results.append(result)
|
||||
progress.advance(task)
|
||||
|
||||
# Show results
|
||||
self.show_processing_results(results)
|
||||
|
||||
def process_qasm_file(self):
|
||||
"""Process a QASM file."""
|
||||
console.print("\n[bold]QASM File Processing[/bold]")
|
||||
|
||||
qasm_file = Prompt.ask("Enter QASM file path")
|
||||
|
||||
if not Path(qasm_file).exists():
|
||||
console.print(f"[red]Error: File '{qasm_file}' not found[/red]")
|
||||
return
|
||||
|
||||
save_plots = Confirm.ask("Save visualization plots?", default=True)
|
||||
|
||||
with console.status(f"[bold green]Processing {qasm_file}..."):
|
||||
result = self.deployment_manager.process_qasm_file(qasm_file, save_plots)
|
||||
|
||||
self.show_processing_results([result])
|
||||
|
||||
def run_comprehensive_demo(self):
|
||||
"""Run comprehensive demo."""
|
||||
console.print("\n[bold]Comprehensive Demo[/bold]")
|
||||
console.print("This will process multiple example circuits and generate comprehensive analysis.")
|
||||
|
||||
if not Confirm.ask("Continue with comprehensive demo?", default=True):
|
||||
return
|
||||
|
||||
with Progress(
|
||||
SpinnerColumn(),
|
||||
TextColumn("[progress.description]{task.description}"),
|
||||
console=console
|
||||
) as progress:
|
||||
task = progress.add_task("Running comprehensive demo...", total=None)
|
||||
summary = self.deployment_manager.run_comprehensive_demo()
|
||||
progress.update(task, description="Demo completed!")
|
||||
|
||||
# Show summary
|
||||
console.print("\n[bold green]Demo Summary[/bold green]")
|
||||
console.print(f"Total processed: {summary['total_processed']}")
|
||||
console.print(f"Success rate: {summary['success_rate']:.2%}")
|
||||
console.print(f"Average processing time: {summary['average_processing_time']:.3f}s")
|
||||
console.print(f"Total nodes processed: {summary['total_nodes_processed']}")
|
||||
console.print(f"Duration: {summary['deployment_duration']:.2f}s")
|
||||
|
||||
def custom_circuit_processing(self):
|
||||
"""Custom circuit processing workflow."""
|
||||
console.print("\n[bold]Custom Circuit Processing[/bold]")
|
||||
console.print("Create custom quantum circuits for HDH analysis.")
|
||||
|
||||
circuit_types = {
|
||||
"1": "Random Circuit",
|
||||
"2": "GHZ State (custom size)",
|
||||
"3": "QFT (custom size)",
|
||||
"4": "Bell State Variants"
|
||||
}
|
||||
|
||||
for key, desc in circuit_types.items():
|
||||
console.print(f" [cyan]{key}[/cyan]. {desc}")
|
||||
|
||||
choice = Prompt.ask("Select circuit type", choices=list(circuit_types.keys()))
|
||||
|
||||
if choice == "1":
|
||||
n_qubits = int(Prompt.ask("Number of qubits", default="4"))
|
||||
depth = int(Prompt.ask("Circuit depth", default="8"))
|
||||
seed = int(Prompt.ask("Random seed", default="42"))
|
||||
|
||||
circuit = self.circuit_library.random_circuit(n_qubits, depth, seed)
|
||||
circuit.name = f"Custom Random {n_qubits}q {depth}d"
|
||||
|
||||
elif choice == "2":
|
||||
n_qubits = int(Prompt.ask("Number of qubits", default="5"))
|
||||
circuit = self.circuit_library.ghz_state(n_qubits)
|
||||
|
||||
elif choice == "3":
|
||||
n_qubits = int(Prompt.ask("Number of qubits", default="4"))
|
||||
circuit = self.circuit_library.qft_circuit(n_qubits)
|
||||
|
||||
elif choice == "4":
|
||||
variants = self.circuit_library.bell_state_variants()
|
||||
console.print(f"Processing all {len(variants)} Bell state variants...")
|
||||
|
||||
results = []
|
||||
for variant in variants:
|
||||
result = self.deployment_manager.process_circuit(variant, True)
|
||||
results.append(result)
|
||||
|
||||
self.show_processing_results(results)
|
||||
return
|
||||
|
||||
save_plots = Confirm.ask("Save visualization plots?", default=True)
|
||||
|
||||
with console.status(f"Processing {circuit.name}..."):
|
||||
result = self.deployment_manager.process_circuit(circuit, save_plots)
|
||||
|
||||
self.show_processing_results([result])
|
||||
|
||||
def benchmarking_menu(self):
|
||||
"""Handle benchmarking menu."""
|
||||
console.print("\n[bold yellow]Performance Benchmarking[/bold yellow]")
|
||||
|
||||
if not self.benchmark_suite:
|
||||
output_dir = Prompt.ask("Benchmark output directory", default="benchmark_results")
|
||||
repetitions = int(Prompt.ask("Number of repetitions per test", default="3"))
|
||||
|
||||
with console.status("[bold green]Initializing benchmark suite..."):
|
||||
self.benchmark_suite = HDHBenchmarkSuite(output_dir, repetitions)
|
||||
|
||||
while True:
|
||||
console.print("\n[bold]Benchmarking Options:[/bold]")
|
||||
options = {
|
||||
"1": "Scalability Benchmark",
|
||||
"2": "Algorithm Benchmark",
|
||||
"3": "Random Circuit Benchmark",
|
||||
"4": "Comprehensive Benchmark",
|
||||
"5": "View Benchmark Results",
|
||||
"b": "Back to Main Menu"
|
||||
}
|
||||
|
||||
for key, desc in options.items():
|
||||
console.print(f" [cyan]{key}[/cyan]. {desc}")
|
||||
|
||||
choice = Prompt.ask("Select option", choices=list(options.keys()), default="4")
|
||||
|
||||
if choice == "b":
|
||||
break
|
||||
elif choice == "1":
|
||||
self.run_scalability_benchmark()
|
||||
elif choice == "2":
|
||||
self.run_algorithm_benchmark()
|
||||
elif choice == "3":
|
||||
self.run_random_benchmark()
|
||||
elif choice == "4":
|
||||
self.run_comprehensive_benchmark()
|
||||
elif choice == "5":
|
||||
self.view_benchmark_results()
|
||||
|
||||
def run_scalability_benchmark(self):
|
||||
"""Run scalability benchmark."""
|
||||
console.print("\n[bold]Scalability Benchmark[/bold]")
|
||||
console.print("Testing HDH performance across different circuit sizes...")
|
||||
|
||||
with Progress(
|
||||
SpinnerColumn(),
|
||||
TextColumn("[progress.description]{task.description}"),
|
||||
console=console
|
||||
) as progress:
|
||||
task = progress.add_task("Running scalability benchmark...", total=None)
|
||||
results = self.benchmark_suite.run_scalability_benchmark()
|
||||
progress.update(task, description="Scalability benchmark completed!")
|
||||
|
||||
self.show_benchmark_results(results, "Scalability")
|
||||
|
||||
def run_algorithm_benchmark(self):
|
||||
"""Run algorithm benchmark."""
|
||||
console.print("\n[bold]Algorithm Benchmark[/bold]")
|
||||
console.print("Testing HDH performance on quantum algorithms...")
|
||||
|
||||
with Progress(
|
||||
SpinnerColumn(),
|
||||
TextColumn("[progress.description]{task.description}"),
|
||||
console=console
|
||||
) as progress:
|
||||
task = progress.add_task("Running algorithm benchmark...", total=None)
|
||||
results = self.benchmark_suite.run_algorithm_benchmark()
|
||||
progress.update(task, description="Algorithm benchmark completed!")
|
||||
|
||||
self.show_benchmark_results(results, "Algorithm")
|
||||
|
||||
def run_random_benchmark(self):
|
||||
"""Run random circuit benchmark."""
|
||||
console.print("\n[bold]Random Circuit Benchmark[/bold]")
|
||||
|
||||
max_qubits = int(Prompt.ask("Maximum qubits", default="6"))
|
||||
max_depth = int(Prompt.ask("Maximum depth", default="20"))
|
||||
|
||||
with Progress(
|
||||
SpinnerColumn(),
|
||||
TextColumn("[progress.description]{task.description}"),
|
||||
console=console
|
||||
) as progress:
|
||||
task = progress.add_task("Running random circuit benchmark...", total=None)
|
||||
results = self.benchmark_suite.run_random_circuit_benchmark(max_qubits, max_depth)
|
||||
progress.update(task, description="Random circuit benchmark completed!")
|
||||
|
||||
self.show_benchmark_results(results, "Random Circuit")
|
||||
|
||||
def run_comprehensive_benchmark(self):
|
||||
"""Run comprehensive benchmark."""
|
||||
console.print("\n[bold]Comprehensive Benchmark[/bold]")
|
||||
console.print("Running all benchmark suites and generating analysis...")
|
||||
|
||||
if not Confirm.ask("This may take several minutes. Continue?", default=True):
|
||||
return
|
||||
|
||||
with Progress(
|
||||
SpinnerColumn(),
|
||||
TextColumn("[progress.description]{task.description}"),
|
||||
console=console
|
||||
) as progress:
|
||||
task = progress.add_task("Running comprehensive benchmark...", total=None)
|
||||
report = self.benchmark_suite.run_full_benchmark()
|
||||
progress.update(task, description="Comprehensive benchmark completed!")
|
||||
|
||||
# Show comprehensive report
|
||||
console.print("\n[bold green]Comprehensive Benchmark Report[/bold green]")
|
||||
|
||||
if "error" not in report:
|
||||
summary = report["benchmark_summary"]
|
||||
perf_stats = report["performance_statistics"]
|
||||
|
||||
table = Table(title="Benchmark Summary")
|
||||
table.add_column("Metric", style="cyan")
|
||||
table.add_column("Value", style="green")
|
||||
|
||||
table.add_row("Total Circuits", str(summary['total_circuits']))
|
||||
table.add_row("Success Rate", "100%")
|
||||
table.add_row("Avg Conversion Time", f"{perf_stats['conversion_time']['mean']:.4f}s")
|
||||
table.add_row("Avg Memory Usage", f"{perf_stats['memory_usage']['mean']:.2f}MB")
|
||||
table.add_row("Largest Circuit", f"{report['scalability_analysis']['largest_circuit_qubits']} qubits")
|
||||
table.add_row("Total Time", f"{summary.get('total_benchmark_time', 0):.2f}s")
|
||||
|
||||
console.print(table)
|
||||
console.print(f"\n📁 Results saved in: {self.benchmark_suite.output_dir}")
|
||||
else:
|
||||
console.print(f"[red]Benchmark failed: {report['error']}[/red]")
|
||||
|
||||
def circuit_library_menu(self):
|
||||
"""Handle circuit library menu."""
|
||||
console.print("\n[bold yellow]Circuit Library & Examples[/bold yellow]")
|
||||
|
||||
while True:
|
||||
console.print("\n[bold]Circuit Library Options:[/bold]")
|
||||
options = {
|
||||
"1": "View Available Circuits",
|
||||
"2": "Generate QASM Examples",
|
||||
"3": "Create Custom Circuit",
|
||||
"4": "Circuit Information",
|
||||
"b": "Back to Main Menu"
|
||||
}
|
||||
|
||||
for key, desc in options.items():
|
||||
console.print(f" [cyan]{key}[/cyan]. {desc}")
|
||||
|
||||
choice = Prompt.ask("Select option", choices=list(options.keys()), default="1")
|
||||
|
||||
if choice == "b":
|
||||
break
|
||||
elif choice == "1":
|
||||
self.view_available_circuits()
|
||||
elif choice == "2":
|
||||
self.generate_qasm_examples()
|
||||
elif choice == "3":
|
||||
self.create_custom_circuit()
|
||||
elif choice == "4":
|
||||
self.show_circuit_information()
|
||||
|
||||
def view_available_circuits(self):
|
||||
"""View available circuits in the library."""
|
||||
console.print("\n[bold]Available Quantum Circuits[/bold]")
|
||||
|
||||
examples = self.circuit_library.get_all_examples()
|
||||
|
||||
# Create tree structure
|
||||
tree = Tree("HDH Circuit Library")
|
||||
|
||||
# Group circuits by category
|
||||
categories = {
|
||||
"Basic States": ["bell_state", "ghz_3", "ghz_4", "w_state"],
|
||||
"Algorithms": ["qft_3", "qft_4", "grover_2", "deutsch_jozsa"],
|
||||
"Protocols": ["teleportation", "error_correction"],
|
||||
"Variational": ["vqe_ansatz", "qaoa"],
|
||||
"Random": ["random_small", "random_medium"]
|
||||
}
|
||||
|
||||
for category, circuit_names in categories.items():
|
||||
category_branch = tree.add(f"[bold blue]{category}[/bold blue]")
|
||||
for name in circuit_names:
|
||||
if name in examples:
|
||||
circuit = examples[name]
|
||||
info = f"{name} - {circuit.num_qubits}q, depth {circuit.depth()}"
|
||||
category_branch.add(f"[green]{info}[/green]")
|
||||
|
||||
console.print(tree)
|
||||
|
||||
# Show benchmark suite info
|
||||
benchmark_circuits = self.circuit_library.get_benchmark_suite()
|
||||
console.print(f"\n[bold]Benchmark Suite:[/bold] {len(benchmark_circuits)} circuits")
|
||||
|
||||
def generate_qasm_examples(self):
|
||||
"""Generate QASM example files."""
|
||||
console.print("\n[bold]Generate QASM Examples[/bold]")
|
||||
|
||||
output_dir = Prompt.ask("Output directory for QASM files", default="qasm_examples")
|
||||
|
||||
with console.status(f"Generating QASM examples in {output_dir}..."):
|
||||
from circuit_examples import save_example_qasm_files
|
||||
save_example_qasm_files(output_dir)
|
||||
|
||||
console.print(f"[green]QASM examples generated in {output_dir}/[/green]")
|
||||
|
||||
def show_processing_results(self, results: List[Dict[str, Any]]):
|
||||
"""Display circuit processing results."""
|
||||
console.print("\n[bold green]Processing Results[/bold green]")
|
||||
|
||||
table = Table(title="Circuit Processing Summary")
|
||||
table.add_column("Circuit", style="cyan")
|
||||
table.add_column("Status", style="green")
|
||||
table.add_column("Nodes", justify="right")
|
||||
table.add_column("Edges", justify="right")
|
||||
table.add_column("Time (s)", justify="right")
|
||||
|
||||
for result in results:
|
||||
status = "✅ Success" if result['success'] else "❌ Failed"
|
||||
if result['success']:
|
||||
table.add_row(
|
||||
result['circuit_name'],
|
||||
status,
|
||||
str(result['hdh_stats']['nodes']),
|
||||
str(result['hdh_stats']['edges']),
|
||||
f"{result['processing_time']:.3f}"
|
||||
)
|
||||
else:
|
||||
table.add_row(
|
||||
result['circuit_name'],
|
||||
status,
|
||||
"-", "-",
|
||||
f"{result['processing_time']:.3f}"
|
||||
)
|
||||
|
||||
console.print(table)
|
||||
|
||||
# Show output directory
|
||||
console.print(f"\n📁 Results saved in: {self.output_dir}")
|
||||
|
||||
def show_benchmark_results(self, results: List, benchmark_type: str):
|
||||
"""Display benchmark results."""
|
||||
console.print(f"\n[bold green]{benchmark_type} Benchmark Results[/bold green]")
|
||||
|
||||
if not results:
|
||||
console.print("[red]No results to display[/red]")
|
||||
return
|
||||
|
||||
table = Table(title=f"{benchmark_type} Performance")
|
||||
table.add_column("Circuit", style="cyan")
|
||||
table.add_column("Qubits", justify="right")
|
||||
table.add_column("Conversion (s)", justify="right")
|
||||
table.add_column("Memory (MB)", justify="right")
|
||||
table.add_column("HDH Nodes", justify="right")
|
||||
|
||||
for result in results:
|
||||
if result.success:
|
||||
table.add_row(
|
||||
result.circuit_name,
|
||||
str(result.num_qubits),
|
||||
f"{result.conversion_time:.4f}",
|
||||
f"{result.memory_peak_mb:.2f}",
|
||||
str(result.hdh_nodes)
|
||||
)
|
||||
|
||||
console.print(table)
|
||||
|
||||
def configuration_menu(self):
|
||||
"""Handle configuration menu."""
|
||||
console.print("\n[bold yellow]Configuration & Settings[/bold yellow]")
|
||||
|
||||
console.print("Current settings:")
|
||||
console.print(f" Output directory: [cyan]{self.output_dir}[/cyan]")
|
||||
|
||||
if self.deployment_manager:
|
||||
console.print(" Deployment manager: [green]Initialized[/green]")
|
||||
else:
|
||||
console.print(" Deployment manager: [red]Not initialized[/red]")
|
||||
|
||||
if self.benchmark_suite:
|
||||
console.print(" Benchmark suite: [green]Initialized[/green]")
|
||||
else:
|
||||
console.print(" Benchmark suite: [red]Not initialized[/red]")
|
||||
|
||||
# Configuration options
|
||||
if Confirm.ask("\nUpdate output directory?", default=False):
|
||||
self.output_dir = Prompt.ask("New output directory", default=self.output_dir)
|
||||
# Reset managers to pick up new directory
|
||||
self.deployment_manager = None
|
||||
self.benchmark_suite = None
|
||||
|
||||
def view_results_menu(self):
|
||||
"""Handle results viewing menu."""
|
||||
console.print("\n[bold yellow]View Results & Reports[/bold yellow]")
|
||||
|
||||
# Look for result files
|
||||
result_dirs = [
|
||||
Path(self.output_dir),
|
||||
Path("hdh_results"),
|
||||
Path("benchmark_results")
|
||||
]
|
||||
|
||||
found_results = []
|
||||
for result_dir in result_dirs:
|
||||
if result_dir.exists():
|
||||
json_files = list(result_dir.glob("*.json"))
|
||||
png_files = list(result_dir.glob("*.png"))
|
||||
found_results.append((result_dir, json_files, png_files))
|
||||
|
||||
if not found_results:
|
||||
console.print("[red]No result files found[/red]")
|
||||
return
|
||||
|
||||
# Display found results
|
||||
for result_dir, json_files, png_files in found_results:
|
||||
console.print(f"\n[bold]Results in {result_dir}:[/bold]")
|
||||
|
||||
if json_files:
|
||||
console.print(" [cyan]JSON Reports:[/cyan]")
|
||||
for json_file in json_files:
|
||||
console.print(f" • {json_file.name}")
|
||||
|
||||
if png_files:
|
||||
console.print(" [cyan]Visualizations:[/cyan]")
|
||||
for png_file in png_files:
|
||||
console.print(f" • {png_file.name}")
|
||||
|
||||
# Option to view specific files
|
||||
if Confirm.ask("\nView a specific JSON report?", default=False):
|
||||
all_json = []
|
||||
for _, json_files, _ in found_results:
|
||||
all_json.extend(json_files)
|
||||
|
||||
if all_json:
|
||||
file_choices = {str(i+1): f.name for i, f in enumerate(all_json)}
|
||||
file_choices["b"] = "Back"
|
||||
|
||||
console.print("\nAvailable JSON reports:")
|
||||
for key, filename in file_choices.items():
|
||||
console.print(f" [cyan]{key}[/cyan]. {filename}")
|
||||
|
||||
choice = Prompt.ask("Select file", choices=list(file_choices.keys()))
|
||||
|
||||
if choice != "b":
|
||||
selected_file = all_json[int(choice) - 1]
|
||||
self.view_json_report(selected_file)
|
||||
|
||||
def view_json_report(self, json_file: Path):
|
||||
"""View a JSON report file."""
|
||||
try:
|
||||
with open(json_file, 'r') as f:
|
||||
data = json.load(f)
|
||||
|
||||
# Pretty print JSON with syntax highlighting
|
||||
json_str = json.dumps(data, indent=2)
|
||||
syntax = Syntax(json_str, "json", theme="monokai", line_numbers=True)
|
||||
|
||||
console.print(f"\n[bold]Contents of {json_file.name}:[/bold]")
|
||||
console.print(syntax)
|
||||
|
||||
except Exception as e:
|
||||
console.print(f"[red]Error reading {json_file}: {str(e)}[/red]")
|
||||
|
||||
def help_menu(self):
|
||||
"""Display help and documentation."""
|
||||
console.print("\n[bold yellow]Help & Documentation[/bold yellow]")
|
||||
|
||||
help_text = """
|
||||
[bold blue]HDH CLI Help[/bold blue]
|
||||
|
||||
[bold]About HDH:[/bold]
|
||||
HDH (Hybrid Dependency Hypergraph) is a library for representing and analyzing
|
||||
quantum circuits using hypergraph structures. It enables:
|
||||
• Circuit conversion from multiple quantum frameworks
|
||||
• Dependency analysis and visualization
|
||||
• Circuit partitioning and optimization
|
||||
• Performance benchmarking
|
||||
|
||||
[bold]Main Features:[/bold]
|
||||
1. [cyan]Circuit Processing[/cyan] - Convert and analyze quantum circuits
|
||||
2. [cyan]Benchmarking[/cyan] - Measure HDH performance across different circuits
|
||||
3. [cyan]Circuit Library[/cyan] - Access pre-built quantum circuit examples
|
||||
4. [cyan]Configuration[/cyan] - Customize settings and output directories
|
||||
5. [cyan]Results Viewing[/cyan] - Examine processing results and reports
|
||||
|
||||
[bold]Supported Circuit Types:[/bold]
|
||||
• Qiskit QuantumCircuit objects
|
||||
• OpenQASM 2.0 files
|
||||
• Pre-built examples (Bell states, GHZ, QFT, Grover, etc.)
|
||||
|
||||
[bold]Output Files:[/bold]
|
||||
• JSON reports with detailed metrics
|
||||
• PNG visualizations of HDH structures
|
||||
• Performance benchmark plots
|
||||
• Processing logs
|
||||
|
||||
[bold]Tips:[/bold]
|
||||
• Start with small circuits (< 8 qubits) for faster processing
|
||||
• Use the comprehensive demo for a full overview
|
||||
• Enable plot saving to visualize HDH structures
|
||||
• Run benchmarks to understand performance characteristics
|
||||
|
||||
[italic]Special thanks to Maria Gragera Garces for the HDH library![/italic]
|
||||
"""
|
||||
|
||||
console.print(Panel(help_text.strip(), expand=False, border_style="blue"))
|
||||
|
||||
if Confirm.ask("\nWould you like to see example usage?", default=False):
|
||||
self.show_example_usage()
|
||||
|
||||
def show_example_usage(self):
|
||||
"""Show example usage scenarios."""
|
||||
console.print("\n[bold]Example Usage Scenarios[/bold]")
|
||||
|
||||
examples = [
|
||||
{
|
||||
"title": "Quick Start - Process Bell State",
|
||||
"steps": [
|
||||
"1. Select 'Process Quantum Circuits'",
|
||||
"2. Choose 'Process Example Circuits'",
|
||||
"3. Enter 'bell_state' when prompted",
|
||||
"4. Confirm to save plots",
|
||||
"5. View results in output directory"
|
||||
]
|
||||
},
|
||||
{
|
||||
"title": "Benchmark Suite",
|
||||
"steps": [
|
||||
"1. Select 'Run Performance Benchmarks'",
|
||||
"2. Choose 'Comprehensive Benchmark'",
|
||||
"3. Confirm to run (may take several minutes)",
|
||||
"4. View generated performance plots",
|
||||
"5. Check benchmark_report.json for detailed metrics"
|
||||
]
|
||||
},
|
||||
{
|
||||
"title": "Custom QASM Processing",
|
||||
"steps": [
|
||||
"1. Select 'Process Quantum Circuits'",
|
||||
"2. Choose 'Process QASM File'",
|
||||
"3. Enter path to your .qasm file",
|
||||
"4. Confirm visualization settings",
|
||||
"5. Review HDH conversion results"
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
for example in examples:
|
||||
console.print(f"\n[bold green]{example['title']}:[/bold green]")
|
||||
for step in example['steps']:
|
||||
console.print(f" {step}")
|
||||
|
||||
def create_custom_circuit(self):
|
||||
"""Guide for creating custom circuits."""
|
||||
console.print("\n[bold]Custom Circuit Creation[/bold]")
|
||||
console.print("This feature allows you to create custom quantum circuits programmatically.")
|
||||
console.print("For now, use the 'Custom Circuit Processing' option in the main processing menu.")
|
||||
|
||||
def show_circuit_information(self):
|
||||
"""Show detailed information about circuits."""
|
||||
console.print("\n[bold]Circuit Information[/bold]")
|
||||
|
||||
examples = self.circuit_library.get_all_examples()
|
||||
circuit_name = Prompt.ask(
|
||||
"Enter circuit name for details",
|
||||
choices=list(examples.keys()),
|
||||
default="bell_state"
|
||||
)
|
||||
|
||||
if circuit_name in examples:
|
||||
circuit = examples[circuit_name]
|
||||
|
||||
info_table = Table(title=f"Circuit Information: {circuit_name}")
|
||||
info_table.add_column("Property", style="cyan")
|
||||
info_table.add_column("Value", style="green")
|
||||
|
||||
info_table.add_row("Name", getattr(circuit, 'name', circuit_name))
|
||||
info_table.add_row("Qubits", str(circuit.num_qubits))
|
||||
info_table.add_row("Classical Bits", str(circuit.num_clbits))
|
||||
info_table.add_row("Depth", str(circuit.depth()))
|
||||
info_table.add_row("Size (gates)", str(circuit.size()))
|
||||
info_table.add_row("Operations", str(len(circuit.data)))
|
||||
|
||||
console.print(info_table)
|
||||
|
||||
# Show QASM representation
|
||||
if Confirm.ask("Show QASM representation?", default=False):
|
||||
try:
|
||||
qasm_str = circuit.qasm()
|
||||
syntax = Syntax(qasm_str, "qasm", theme="monokai", line_numbers=True)
|
||||
console.print(f"\n[bold]QASM for {circuit_name}:[/bold]")
|
||||
console.print(syntax)
|
||||
except Exception as e:
|
||||
console.print(f"[red]Error generating QASM: {str(e)}[/red]")
|
||||
|
||||
def run(self):
|
||||
"""Main CLI loop."""
|
||||
try:
|
||||
while True:
|
||||
choice = self.show_main_menu()
|
||||
|
||||
if choice == "q":
|
||||
console.print("\n[bold blue]Thank you for using HDH CLI![/bold blue]")
|
||||
console.print("[italic]Special thanks to Maria Gragera Garces! 🎉[/italic]")
|
||||
break
|
||||
elif choice == "1":
|
||||
self.process_circuits_menu()
|
||||
elif choice == "2":
|
||||
self.benchmarking_menu()
|
||||
elif choice == "3":
|
||||
self.circuit_library_menu()
|
||||
elif choice == "4":
|
||||
self.configuration_menu()
|
||||
elif choice == "5":
|
||||
self.view_results_menu()
|
||||
elif choice == "6":
|
||||
self.help_menu()
|
||||
|
||||
except KeyboardInterrupt:
|
||||
console.print("\n[yellow]Goodbye![/yellow]")
|
||||
except Exception as e:
|
||||
console.print(f"\n[red]An error occurred: {str(e)}[/red]")
|
||||
|
||||
|
||||
@click.command()
|
||||
@click.option('--output-dir', default='hdh_results', help='Default output directory')
|
||||
@click.option('--theme', default='default', help='Console theme')
|
||||
def main(output_dir, theme):
|
||||
"""
|
||||
HDH Command Line Interface
|
||||
|
||||
Interactive CLI for HDH (Hybrid Dependency Hypergraph) deployment and analysis.
|
||||
"""
|
||||
try:
|
||||
cli = HDHCommandLineInterface()
|
||||
cli.output_dir = output_dir
|
||||
cli.run()
|
||||
except Exception as e:
|
||||
console.print(f"[red]Failed to start CLI: {str(e)}[/red]")
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
66
config.yaml
Archivo normal
66
config.yaml
Archivo normal
@@ -0,0 +1,66 @@
|
||||
# HDH Deployment Configuration
|
||||
# Configuration file for the HDH deployment example
|
||||
|
||||
# Logging configuration
|
||||
logging:
|
||||
level: INFO
|
||||
format: "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
|
||||
file: "hdh_deployment.log"
|
||||
|
||||
# Output settings
|
||||
output:
|
||||
directory: "hdh_results"
|
||||
save_plots: true
|
||||
plot_format: "png"
|
||||
plot_dpi: 300
|
||||
|
||||
# Circuit processing settings
|
||||
circuits:
|
||||
max_qubits: 10
|
||||
default_partitions: 3
|
||||
enable_visualization: true
|
||||
save_intermediate: false
|
||||
|
||||
# Performance settings
|
||||
performance:
|
||||
timeout_seconds: 300
|
||||
max_memory_gb: 8
|
||||
parallel_processing: false
|
||||
|
||||
# QASM processing
|
||||
qasm:
|
||||
supported_versions: ["2.0"]
|
||||
max_file_size_mb: 10
|
||||
validate_syntax: true
|
||||
|
||||
# Benchmarking settings
|
||||
benchmark:
|
||||
repetitions: 3
|
||||
warmup_runs: 1
|
||||
measure_memory: true
|
||||
save_detailed_metrics: true
|
||||
|
||||
# Visualization settings
|
||||
visualization:
|
||||
figsize: [12, 8]
|
||||
node_colors:
|
||||
quantum: "#FF6B6B"
|
||||
classical: "#4DABF7"
|
||||
edge_colors:
|
||||
quantum: "#FF8E53"
|
||||
classical: "#69DB7C"
|
||||
layout: "spring"
|
||||
node_size: 100
|
||||
edge_width: 1.5
|
||||
|
||||
# Docker settings (for containerized deployment)
|
||||
docker:
|
||||
base_image: "python:3.11-slim"
|
||||
working_dir: "/app"
|
||||
expose_port: 8080
|
||||
|
||||
# Development settings
|
||||
development:
|
||||
debug_mode: false
|
||||
verbose_logging: false
|
||||
save_debug_info: false
|
||||
192
docker-compose.yml
Archivo normal
192
docker-compose.yml
Archivo normal
@@ -0,0 +1,192 @@
|
||||
version: '3.8'
|
||||
|
||||
# HDH Deployment Docker Compose
|
||||
# Comprehensive deployment setup for HDH library examples
|
||||
# Special thanks to Maria Gragera Garces for the HDH library!
|
||||
|
||||
services:
|
||||
# Main HDH deployment service
|
||||
hdh-deployment:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
BUILD_DATE: ${BUILD_DATE:-$(date -u +'%Y-%m-%dT%H:%M:%SZ')}
|
||||
VERSION: ${VERSION:-1.0.0}
|
||||
container_name: hdh-deployment
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- HDH_OUTPUT_DIR=/app/hdh_results
|
||||
- HDH_LOG_LEVEL=${HDH_LOG_LEVEL:-INFO}
|
||||
- PYTHONPATH=/app
|
||||
- MPLBACKEND=Agg
|
||||
volumes:
|
||||
- hdh_results:/app/hdh_results
|
||||
- benchmark_results:/app/benchmark_results
|
||||
- logs:/app/logs
|
||||
- ./config.yaml:/app/config.yaml:ro
|
||||
# Mount additional QASM files if available
|
||||
- ${QASM_DIR:-./qasm_examples}:/app/qasm_examples:ro
|
||||
command: ["python", "main.py", "--demo-mode", "--output-dir", "/app/hdh_results"]
|
||||
networks:
|
||||
- hdh-network
|
||||
labels:
|
||||
- "hdh.service=deployment"
|
||||
- "hdh.description=Main HDH deployment service"
|
||||
- "hdh.credits=Thanks to Maria Gragera Garces"
|
||||
|
||||
# Benchmarking service
|
||||
hdh-benchmark:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
container_name: hdh-benchmark
|
||||
restart: "no" # Run once
|
||||
environment:
|
||||
- HDH_OUTPUT_DIR=/app/benchmark_results
|
||||
- HDH_LOG_LEVEL=${HDH_LOG_LEVEL:-INFO}
|
||||
- PYTHONPATH=/app
|
||||
- MPLBACKEND=Agg
|
||||
volumes:
|
||||
- benchmark_results:/app/benchmark_results
|
||||
- logs:/app/logs
|
||||
- ./config.yaml:/app/config.yaml:ro
|
||||
command: [
|
||||
"python", "benchmark.py",
|
||||
"--suite", "all",
|
||||
"--repetitions", "${BENCHMARK_REPETITIONS:-3}",
|
||||
"--output-dir", "/app/benchmark_results"
|
||||
]
|
||||
networks:
|
||||
- hdh-network
|
||||
labels:
|
||||
- "hdh.service=benchmark"
|
||||
- "hdh.description=HDH performance benchmarking"
|
||||
profiles:
|
||||
- benchmark
|
||||
|
||||
# Circuit examples generator
|
||||
hdh-examples:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
container_name: hdh-examples
|
||||
restart: "no" # Run once
|
||||
environment:
|
||||
- PYTHONPATH=/app
|
||||
volumes:
|
||||
- qasm_examples:/app/qasm_examples
|
||||
- logs:/app/logs
|
||||
command: ["python", "circuit_examples.py"]
|
||||
networks:
|
||||
- hdh-network
|
||||
labels:
|
||||
- "hdh.service=examples"
|
||||
- "hdh.description=Generate quantum circuit examples"
|
||||
profiles:
|
||||
- examples
|
||||
|
||||
# Jupyter notebook service (optional)
|
||||
hdh-jupyter:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
container_name: hdh-jupyter
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- JUPYTER_ENABLE_LAB=yes
|
||||
- JUPYTER_TOKEN=${JUPYTER_TOKEN:-hdh-token}
|
||||
- PYTHONPATH=/app
|
||||
volumes:
|
||||
- hdh_results:/app/hdh_results
|
||||
- benchmark_results:/app/benchmark_results
|
||||
- ./notebooks:/app/notebooks
|
||||
- ./config.yaml:/app/config.yaml:ro
|
||||
ports:
|
||||
- "${JUPYTER_PORT:-8888}:8888"
|
||||
command: [
|
||||
"jupyter", "lab",
|
||||
"--ip=0.0.0.0",
|
||||
"--port=8888",
|
||||
"--no-browser",
|
||||
"--allow-root",
|
||||
"--notebook-dir=/app"
|
||||
]
|
||||
networks:
|
||||
- hdh-network
|
||||
labels:
|
||||
- "hdh.service=jupyter"
|
||||
- "hdh.description=Jupyter Lab for interactive HDH analysis"
|
||||
profiles:
|
||||
- jupyter
|
||||
|
||||
# Web dashboard (placeholder for future development)
|
||||
hdh-dashboard:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
container_name: hdh-dashboard
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- FLASK_APP=dashboard.py
|
||||
- FLASK_ENV=${FLASK_ENV:-production}
|
||||
- HDH_RESULTS_DIR=/app/hdh_results
|
||||
volumes:
|
||||
- hdh_results:/app/hdh_results:ro
|
||||
- benchmark_results:/app/benchmark_results:ro
|
||||
ports:
|
||||
- "${DASHBOARD_PORT:-8080}:8080"
|
||||
command: ["python", "-c", "print('Dashboard service placeholder - to be implemented')"]
|
||||
networks:
|
||||
- hdh-network
|
||||
labels:
|
||||
- "hdh.service=dashboard"
|
||||
- "hdh.description=Web dashboard for HDH results"
|
||||
profiles:
|
||||
- dashboard
|
||||
|
||||
# Named volumes for persistent storage
|
||||
volumes:
|
||||
hdh_results:
|
||||
driver: local
|
||||
labels:
|
||||
- "hdh.volume=results"
|
||||
- "hdh.description=HDH processing results"
|
||||
|
||||
benchmark_results:
|
||||
driver: local
|
||||
labels:
|
||||
- "hdh.volume=benchmarks"
|
||||
- "hdh.description=Performance benchmark results"
|
||||
|
||||
qasm_examples:
|
||||
driver: local
|
||||
labels:
|
||||
- "hdh.volume=examples"
|
||||
- "hdh.description=Generated QASM examples"
|
||||
|
||||
logs:
|
||||
driver: local
|
||||
labels:
|
||||
- "hdh.volume=logs"
|
||||
- "hdh.description=Application logs"
|
||||
|
||||
# Custom network
|
||||
networks:
|
||||
hdh-network:
|
||||
driver: bridge
|
||||
labels:
|
||||
- "hdh.network=main"
|
||||
- "hdh.description=HDH deployment network"
|
||||
|
||||
# Environment variables with defaults
|
||||
# Create a .env file to override these values:
|
||||
#
|
||||
# HDH_LOG_LEVEL=DEBUG
|
||||
# BENCHMARK_REPETITIONS=5
|
||||
# JUPYTER_TOKEN=your-secure-token
|
||||
# JUPYTER_PORT=8888
|
||||
# DASHBOARD_PORT=8080
|
||||
# QASM_DIR=./qasm_files
|
||||
# BUILD_DATE=2024-01-01T00:00:00Z
|
||||
# VERSION=1.0.0
|
||||
459
main.py
Archivo normal
459
main.py
Archivo normal
@@ -0,0 +1,459 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
HDH Deployment Example - Comprehensive Demo
|
||||
==========================================
|
||||
|
||||
This example demonstrates real-world deployment of the HDH (Hybrid Dependency Hypergraph)
|
||||
library for quantum computation analysis and visualization.
|
||||
|
||||
Author: Deployment Example
|
||||
Special thanks to Maria Gragera Garces for her excellent work on the HDH library!
|
||||
|
||||
Features demonstrated:
|
||||
- Quantum circuit conversion to HDH format
|
||||
- QASM file processing
|
||||
- Circuit analysis and metrics
|
||||
- Visualization generation
|
||||
- Partitioning and optimization
|
||||
- Error handling and logging
|
||||
- Performance monitoring
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import logging
|
||||
import argparse
|
||||
import json
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Tuple, Optional, Any
|
||||
from datetime import datetime
|
||||
|
||||
# Add HDH to path
|
||||
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'HDH')))
|
||||
|
||||
# HDH imports
|
||||
from hdh import HDH, plot_hdh
|
||||
from hdh.converters.qiskit import from_qiskit
|
||||
from hdh.converters.qasm import from_qasm
|
||||
from hdh.passes.cut import compute_cut, cost, partition_sizes, compute_parallelism_by_time
|
||||
|
||||
# Quantum computing imports
|
||||
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
|
||||
from qiskit.circuit.library import QFT, GroverOperator
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
|
||||
class HDHDeploymentManager:
|
||||
"""
|
||||
Main deployment manager for HDH operations.
|
||||
Handles circuit processing, analysis, and reporting.
|
||||
"""
|
||||
|
||||
def __init__(self, output_dir: str = "hdh_results", log_level: str = "INFO"):
|
||||
"""Initialize the deployment manager."""
|
||||
self.output_dir = Path(output_dir)
|
||||
self.output_dir.mkdir(exist_ok=True)
|
||||
|
||||
# Setup logging
|
||||
self.setup_logging(log_level)
|
||||
self.logger = logging.getLogger(__name__)
|
||||
|
||||
# Performance tracking
|
||||
self.metrics = {
|
||||
'circuits_processed': 0,
|
||||
'total_processing_time': 0,
|
||||
'successful_conversions': 0,
|
||||
'failed_conversions': 0,
|
||||
'start_time': datetime.now()
|
||||
}
|
||||
|
||||
self.logger.info("HDH Deployment Manager initialized")
|
||||
self.logger.info(f"Output directory: {self.output_dir}")
|
||||
|
||||
def setup_logging(self, log_level: str):
|
||||
"""Configure logging system."""
|
||||
log_file = self.output_dir / "hdh_deployment.log"
|
||||
|
||||
logging.basicConfig(
|
||||
level=getattr(logging, log_level.upper()),
|
||||
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
||||
handlers=[
|
||||
logging.FileHandler(log_file),
|
||||
logging.StreamHandler(sys.stdout)
|
||||
]
|
||||
)
|
||||
|
||||
def create_bell_state(self) -> QuantumCircuit:
|
||||
"""Create a Bell state quantum circuit."""
|
||||
qc = QuantumCircuit(2, 2)
|
||||
qc.h(0)
|
||||
qc.cx(0, 1)
|
||||
qc.measure_all()
|
||||
qc.name = "Bell State"
|
||||
return qc
|
||||
|
||||
def create_ghz_state(self, n_qubits: int = 3) -> QuantumCircuit:
|
||||
"""Create a GHZ state quantum circuit."""
|
||||
qc = QuantumCircuit(n_qubits, n_qubits)
|
||||
qc.h(0)
|
||||
for i in range(1, n_qubits):
|
||||
qc.cx(0, i)
|
||||
qc.measure_all()
|
||||
qc.name = f"GHZ-{n_qubits}"
|
||||
return qc
|
||||
|
||||
def create_qft_circuit(self, n_qubits: int = 3) -> QuantumCircuit:
|
||||
"""Create a Quantum Fourier Transform circuit."""
|
||||
qc = QuantumCircuit(n_qubits, n_qubits)
|
||||
qft = QFT(n_qubits)
|
||||
qc.compose(qft, inplace=True)
|
||||
qc.measure_all()
|
||||
qc.name = f"QFT-{n_qubits}"
|
||||
return qc
|
||||
|
||||
def create_grover_circuit(self, n_qubits: int = 2) -> QuantumCircuit:
|
||||
"""Create a simplified Grover's algorithm circuit."""
|
||||
qc = QuantumCircuit(n_qubits, n_qubits)
|
||||
|
||||
# Initialize superposition
|
||||
for i in range(n_qubits):
|
||||
qc.h(i)
|
||||
|
||||
# Oracle (mark |11⟩ for 2 qubits)
|
||||
if n_qubits == 2:
|
||||
qc.cz(0, 1)
|
||||
|
||||
# Diffusion operator
|
||||
for i in range(n_qubits):
|
||||
qc.h(i)
|
||||
qc.x(i)
|
||||
|
||||
if n_qubits == 2:
|
||||
qc.cz(0, 1)
|
||||
|
||||
for i in range(n_qubits):
|
||||
qc.x(i)
|
||||
qc.h(i)
|
||||
|
||||
qc.measure_all()
|
||||
qc.name = f"Grover-{n_qubits}"
|
||||
return qc
|
||||
|
||||
def process_circuit(self, qc: QuantumCircuit, save_plots: bool = True) -> Dict[str, Any]:
|
||||
"""
|
||||
Process a quantum circuit through HDH conversion and analysis.
|
||||
|
||||
Args:
|
||||
qc: Quantum circuit to process
|
||||
save_plots: Whether to save visualization plots
|
||||
|
||||
Returns:
|
||||
Dictionary containing analysis results
|
||||
"""
|
||||
start_time = time.time()
|
||||
circuit_name = getattr(qc, 'name', f'Circuit_{self.metrics["circuits_processed"]}')
|
||||
|
||||
try:
|
||||
self.logger.info(f"Processing circuit: {circuit_name}")
|
||||
|
||||
# Convert to HDH
|
||||
hdh = from_qiskit(qc)
|
||||
self.logger.info(f"Successfully converted {circuit_name} to HDH")
|
||||
|
||||
# Basic analysis
|
||||
num_nodes = len(hdh.S)
|
||||
num_edges = len(hdh.C)
|
||||
num_timesteps = len(hdh.T)
|
||||
|
||||
self.logger.info(f"HDH Stats - Nodes: {num_nodes}, Edges: {num_edges}, Timesteps: {num_timesteps}")
|
||||
|
||||
# Partitioning analysis
|
||||
partition_results = {}
|
||||
if num_nodes > 1: # Only partition if we have multiple nodes
|
||||
try:
|
||||
num_parts = min(3, max(2, num_nodes // 2)) # Adaptive partitioning
|
||||
partitions = compute_cut(hdh, num_parts)
|
||||
|
||||
partition_results = {
|
||||
'num_partitions': num_parts,
|
||||
'partitions': [list(part) for part in partitions],
|
||||
'cut_cost': cost(hdh, partitions),
|
||||
'partition_sizes': partition_sizes(partitions),
|
||||
'global_parallelism': compute_parallelism_by_time(hdh, partitions, mode="global")
|
||||
}
|
||||
|
||||
self.logger.info(f"Partitioning completed - Cost: {partition_results['cut_cost']}")
|
||||
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Partitioning failed for {circuit_name}: {str(e)}")
|
||||
partition_results = {'error': str(e)}
|
||||
|
||||
# Visualization
|
||||
visualization_path = None
|
||||
if save_plots:
|
||||
try:
|
||||
vis_file = self.output_dir / f"{circuit_name.replace(' ', '_')}_hdh.png"
|
||||
plt.figure(figsize=(12, 8))
|
||||
plot_hdh(hdh, save_path=str(vis_file))
|
||||
plt.title(f"HDH Visualization: {circuit_name}")
|
||||
plt.tight_layout()
|
||||
plt.savefig(vis_file, dpi=300, bbox_inches='tight')
|
||||
plt.close()
|
||||
visualization_path = str(vis_file)
|
||||
self.logger.info(f"Visualization saved: {vis_file}")
|
||||
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Visualization failed for {circuit_name}: {str(e)}")
|
||||
|
||||
# Processing time
|
||||
processing_time = time.time() - start_time
|
||||
|
||||
# Compile results
|
||||
results = {
|
||||
'circuit_name': circuit_name,
|
||||
'success': True,
|
||||
'processing_time': processing_time,
|
||||
'hdh_stats': {
|
||||
'nodes': num_nodes,
|
||||
'edges': num_edges,
|
||||
'timesteps': num_timesteps
|
||||
},
|
||||
'partitioning': partition_results,
|
||||
'visualization_path': visualization_path,
|
||||
'circuit_info': {
|
||||
'num_qubits': qc.num_qubits,
|
||||
'num_clbits': qc.num_clbits,
|
||||
'depth': qc.depth(),
|
||||
'size': qc.size()
|
||||
}
|
||||
}
|
||||
|
||||
# Update metrics
|
||||
self.metrics['circuits_processed'] += 1
|
||||
self.metrics['successful_conversions'] += 1
|
||||
self.metrics['total_processing_time'] += processing_time
|
||||
|
||||
return results
|
||||
|
||||
except Exception as e:
|
||||
processing_time = time.time() - start_time
|
||||
self.logger.error(f"Failed to process {circuit_name}: {str(e)}")
|
||||
|
||||
self.metrics['circuits_processed'] += 1
|
||||
self.metrics['failed_conversions'] += 1
|
||||
self.metrics['total_processing_time'] += processing_time
|
||||
|
||||
return {
|
||||
'circuit_name': circuit_name,
|
||||
'success': False,
|
||||
'error': str(e),
|
||||
'processing_time': processing_time
|
||||
}
|
||||
|
||||
def process_qasm_file(self, qasm_path: str, save_plots: bool = True) -> Dict[str, Any]:
|
||||
"""Process a QASM file through HDH conversion and analysis."""
|
||||
if not os.path.exists(qasm_path):
|
||||
self.logger.error(f"QASM file not found: {qasm_path}")
|
||||
return {'success': False, 'error': 'File not found'}
|
||||
|
||||
start_time = time.time()
|
||||
file_name = Path(qasm_path).stem
|
||||
|
||||
try:
|
||||
self.logger.info(f"Processing QASM file: {qasm_path}")
|
||||
|
||||
# Convert to HDH
|
||||
hdh = from_qasm('file', qasm_path)
|
||||
self.logger.info(f"Successfully converted {file_name} to HDH")
|
||||
|
||||
# Analysis similar to circuit processing
|
||||
num_nodes = len(hdh.S)
|
||||
num_edges = len(hdh.C)
|
||||
num_timesteps = len(hdh.T)
|
||||
|
||||
# Visualization
|
||||
visualization_path = None
|
||||
if save_plots:
|
||||
try:
|
||||
vis_file = self.output_dir / f"{file_name}_hdh.png"
|
||||
plt.figure(figsize=(12, 8))
|
||||
plot_hdh(hdh, save_path=str(vis_file))
|
||||
plt.title(f"HDH Visualization: {file_name}")
|
||||
plt.tight_layout()
|
||||
plt.savefig(vis_file, dpi=300, bbox_inches='tight')
|
||||
plt.close()
|
||||
visualization_path = str(vis_file)
|
||||
self.logger.info(f"Visualization saved: {vis_file}")
|
||||
|
||||
except Exception as e:
|
||||
self.logger.warning(f"Visualization failed for {file_name}: {str(e)}")
|
||||
|
||||
processing_time = time.time() - start_time
|
||||
|
||||
return {
|
||||
'file_name': file_name,
|
||||
'success': True,
|
||||
'processing_time': processing_time,
|
||||
'hdh_stats': {
|
||||
'nodes': num_nodes,
|
||||
'edges': num_edges,
|
||||
'timesteps': num_timesteps
|
||||
},
|
||||
'visualization_path': visualization_path
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
processing_time = time.time() - start_time
|
||||
self.logger.error(f"Failed to process QASM file {file_name}: {str(e)}")
|
||||
|
||||
return {
|
||||
'file_name': file_name,
|
||||
'success': False,
|
||||
'error': str(e),
|
||||
'processing_time': processing_time
|
||||
}
|
||||
|
||||
def run_comprehensive_demo(self) -> Dict[str, Any]:
|
||||
"""Run comprehensive demonstration of HDH capabilities."""
|
||||
self.logger.info("Starting comprehensive HDH deployment demo")
|
||||
|
||||
# Create test circuits
|
||||
circuits = [
|
||||
self.create_bell_state(),
|
||||
self.create_ghz_state(3),
|
||||
self.create_ghz_state(4),
|
||||
self.create_qft_circuit(3),
|
||||
self.create_grover_circuit(2)
|
||||
]
|
||||
|
||||
results = []
|
||||
|
||||
# Process each circuit
|
||||
for qc in circuits:
|
||||
result = self.process_circuit(qc, save_plots=True)
|
||||
results.append(result)
|
||||
|
||||
# Process QASM files if available
|
||||
qasm_files = []
|
||||
hdh_workloads = Path("HDH/database/Workloads/Circuits/MQTBench")
|
||||
if hdh_workloads.exists():
|
||||
qasm_files = list(hdh_workloads.glob("*.qasm"))[:3] # Process first 3
|
||||
|
||||
for qasm_file in qasm_files:
|
||||
result = self.process_qasm_file(str(qasm_file), save_plots=True)
|
||||
results.append(result)
|
||||
|
||||
# Generate summary
|
||||
summary = self.generate_summary(results)
|
||||
|
||||
# Save results
|
||||
results_file = self.output_dir / "deployment_results.json"
|
||||
with open(results_file, 'w') as f:
|
||||
json.dump({
|
||||
'summary': summary,
|
||||
'detailed_results': results,
|
||||
'metrics': self.metrics
|
||||
}, f, indent=2, default=str)
|
||||
|
||||
self.logger.info(f"Demo completed. Results saved to {results_file}")
|
||||
return summary
|
||||
|
||||
def generate_summary(self, results: List[Dict[str, Any]]) -> Dict[str, Any]:
|
||||
"""Generate summary of processing results."""
|
||||
successful = [r for r in results if r.get('success', False)]
|
||||
failed = [r for r in results if not r.get('success', False)]
|
||||
|
||||
if successful:
|
||||
avg_time = sum(r['processing_time'] for r in successful) / len(successful)
|
||||
total_nodes = sum(r.get('hdh_stats', {}).get('nodes', 0) for r in successful)
|
||||
total_edges = sum(r.get('hdh_stats', {}).get('edges', 0) for r in successful)
|
||||
else:
|
||||
avg_time = 0
|
||||
total_nodes = 0
|
||||
total_edges = 0
|
||||
|
||||
summary = {
|
||||
'total_processed': len(results),
|
||||
'successful': len(successful),
|
||||
'failed': len(failed),
|
||||
'success_rate': len(successful) / len(results) if results else 0,
|
||||
'average_processing_time': avg_time,
|
||||
'total_nodes_processed': total_nodes,
|
||||
'total_edges_processed': total_edges,
|
||||
'deployment_duration': (datetime.now() - self.metrics['start_time']).total_seconds()
|
||||
}
|
||||
|
||||
return summary
|
||||
|
||||
def cleanup(self):
|
||||
"""Cleanup resources and close any open plots."""
|
||||
plt.close('all')
|
||||
self.logger.info("HDH Deployment Manager cleanup completed")
|
||||
|
||||
|
||||
def main():
|
||||
"""Main deployment function."""
|
||||
parser = argparse.ArgumentParser(description="HDH Deployment Example")
|
||||
parser.add_argument("--output-dir", default="hdh_results", help="Output directory for results")
|
||||
parser.add_argument("--log-level", default="INFO", choices=["DEBUG", "INFO", "WARNING", "ERROR"])
|
||||
parser.add_argument("--no-plots", action="store_true", help="Disable plot generation")
|
||||
parser.add_argument("--qasm-file", help="Specific QASM file to process")
|
||||
parser.add_argument("--demo-mode", action="store_true", help="Run comprehensive demo")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
# Initialize deployment manager
|
||||
manager = HDHDeploymentManager(
|
||||
output_dir=args.output_dir,
|
||||
log_level=args.log_level
|
||||
)
|
||||
|
||||
try:
|
||||
if args.qasm_file:
|
||||
# Process specific QASM file
|
||||
result = manager.process_qasm_file(args.qasm_file, save_plots=not args.no_plots)
|
||||
print(f"Processing result: {json.dumps(result, indent=2, default=str)}")
|
||||
|
||||
elif args.demo_mode:
|
||||
# Run comprehensive demo
|
||||
summary = manager.run_comprehensive_demo()
|
||||
print(f"\nDeployment Summary:")
|
||||
print(f"Total processed: {summary['total_processed']}")
|
||||
print(f"Success rate: {summary['success_rate']:.2%}")
|
||||
print(f"Average processing time: {summary['average_processing_time']:.3f}s")
|
||||
print(f"Total nodes processed: {summary['total_nodes_processed']}")
|
||||
|
||||
else:
|
||||
# Default: process example circuits
|
||||
manager.logger.info("Running default circuit processing examples")
|
||||
|
||||
circuits = [
|
||||
manager.create_bell_state(),
|
||||
manager.create_ghz_state(3),
|
||||
manager.create_qft_circuit(3)
|
||||
]
|
||||
|
||||
for qc in circuits:
|
||||
result = manager.process_circuit(qc, save_plots=not args.no_plots)
|
||||
print(f"\nProcessed {result['circuit_name']}:")
|
||||
print(f" Success: {result['success']}")
|
||||
if result['success']:
|
||||
print(f" Nodes: {result['hdh_stats']['nodes']}")
|
||||
print(f" Edges: {result['hdh_stats']['edges']}")
|
||||
print(f" Processing time: {result['processing_time']:.3f}s")
|
||||
|
||||
print(f"\n🎉 Thank you Maria Gragera Garces for the excellent HDH library! 🎉")
|
||||
print(f"Results saved in: {manager.output_dir}")
|
||||
|
||||
except KeyboardInterrupt:
|
||||
manager.logger.info("Deployment interrupted by user")
|
||||
except Exception as e:
|
||||
manager.logger.error(f"Deployment failed: {str(e)}")
|
||||
raise
|
||||
finally:
|
||||
manager.cleanup()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
45
requirements.txt
Archivo normal
45
requirements.txt
Archivo normal
@@ -0,0 +1,45 @@
|
||||
# HDH Deployment Example Requirements
|
||||
# Dependencies for the comprehensive HDH deployment demonstration
|
||||
|
||||
# Core HDH dependencies (from pyproject.toml)
|
||||
qiskit>=1.0
|
||||
networkx>=3.0
|
||||
matplotlib>=3.0
|
||||
metis==0.2a5
|
||||
|
||||
# Additional deployment dependencies
|
||||
numpy>=1.21.0
|
||||
scipy>=1.7.0
|
||||
pandas>=1.3.0
|
||||
|
||||
# Logging and configuration
|
||||
pyyaml>=6.0
|
||||
python-dotenv>=0.19.0
|
||||
|
||||
# CLI enhancements
|
||||
click>=8.0.0
|
||||
rich>=12.0.0
|
||||
tqdm>=4.64.0
|
||||
|
||||
# Testing and development
|
||||
pytest>=7.0.0
|
||||
pytest-cov>=4.0.0
|
||||
black>=22.0.0
|
||||
isort>=5.10.0
|
||||
flake8>=5.0.0
|
||||
|
||||
# Documentation
|
||||
sphinx>=5.0.0
|
||||
sphinx-rtd-theme>=1.0.0
|
||||
|
||||
# Performance monitoring
|
||||
psutil>=5.9.0
|
||||
memory-profiler>=0.60.0
|
||||
|
||||
# Data handling
|
||||
h5py>=3.7.0
|
||||
joblib>=1.2.0
|
||||
|
||||
# Jupyter support (optional)
|
||||
jupyter>=1.0.0
|
||||
ipykernel>=6.15.0
|
||||
89
setup.py
Archivo normal
89
setup.py
Archivo normal
@@ -0,0 +1,89 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Setup script for HDH Deployment Example
|
||||
|
||||
This script helps set up the deployment environment for the HDH library examples.
|
||||
Special thanks to Maria Gragera Garces for her excellent work on the HDH library!
|
||||
"""
|
||||
|
||||
from setuptools import setup, find_packages
|
||||
from pathlib import Path
|
||||
|
||||
# Read requirements
|
||||
requirements_file = Path(__file__).parent / "requirements.txt"
|
||||
with open(requirements_file) as f:
|
||||
requirements = [
|
||||
line.strip()
|
||||
for line in f
|
||||
if line.strip() and not line.startswith('#')
|
||||
]
|
||||
|
||||
# Read README for long description
|
||||
readme_file = Path(__file__).parent / "README.md"
|
||||
long_description = ""
|
||||
if readme_file.exists():
|
||||
with open(readme_file, encoding='utf-8') as f:
|
||||
long_description = f.read()
|
||||
|
||||
setup(
|
||||
name="hdh-deployment-example",
|
||||
version="1.0.0",
|
||||
description="Comprehensive deployment example for HDH (Hybrid Dependency Hypergraph) library",
|
||||
long_description=long_description,
|
||||
long_description_content_type="text/markdown",
|
||||
author="HDH Deployment Team",
|
||||
author_email="example@hdh-deployment.com",
|
||||
url="https://github.com/grageragarces/hdh",
|
||||
packages=find_packages(),
|
||||
install_requires=requirements,
|
||||
python_requires=">=3.10",
|
||||
classifiers=[
|
||||
"Development Status :: 4 - Beta",
|
||||
"Intended Audience :: Science/Research",
|
||||
"License :: OSI Approved :: MIT License",
|
||||
"Operating System :: OS Independent",
|
||||
"Programming Language :: Python :: 3",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
"Programming Language :: Python :: 3.12",
|
||||
"Topic :: Scientific/Engineering :: Physics",
|
||||
"Topic :: Software Development :: Libraries :: Python Modules",
|
||||
],
|
||||
keywords=[
|
||||
"quantum computing",
|
||||
"hypergraph",
|
||||
"quantum circuits",
|
||||
"hdh",
|
||||
"dependency analysis",
|
||||
"quantum compilation"
|
||||
],
|
||||
entry_points={
|
||||
'console_scripts': [
|
||||
'hdh-deploy=main:main',
|
||||
'hdh-benchmark=benchmark:main',
|
||||
'hdh-cli=cli:main',
|
||||
],
|
||||
},
|
||||
extras_require={
|
||||
'dev': [
|
||||
'pytest>=7.0.0',
|
||||
'pytest-cov>=4.0.0',
|
||||
'black>=22.0.0',
|
||||
'isort>=5.10.0',
|
||||
'flake8>=5.0.0',
|
||||
],
|
||||
'docs': [
|
||||
'sphinx>=5.0.0',
|
||||
'sphinx-rtd-theme>=1.0.0',
|
||||
],
|
||||
'jupyter': [
|
||||
'jupyter>=1.0.0',
|
||||
'ipykernel>=6.15.0',
|
||||
],
|
||||
},
|
||||
project_urls={
|
||||
'Bug Reports': 'https://github.com/grageragarces/hdh/issues',
|
||||
'Source': 'https://github.com/grageragarces/hdh',
|
||||
'Documentation': 'https://grageragarces.github.io/HDH/',
|
||||
},
|
||||
)
|
||||
524
utils.py
Archivo normal
524
utils.py
Archivo normal
@@ -0,0 +1,524 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
HDH Deployment Utilities
|
||||
========================
|
||||
|
||||
Utility functions and helper classes for the HDH deployment example.
|
||||
|
||||
Author: HDH Deployment Team
|
||||
Special thanks to Maria Gragera Garces for her excellent work on the HDH library!
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import yaml
|
||||
import time
|
||||
import logging
|
||||
from pathlib import Path
|
||||
from typing import Dict, Any, List, Optional, Union
|
||||
from datetime import datetime
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
|
||||
|
||||
class ConfigManager:
|
||||
"""Configuration management for HDH deployment."""
|
||||
|
||||
def __init__(self, config_file: str = "config.yaml"):
|
||||
"""Initialize configuration manager."""
|
||||
self.config_file = Path(config_file)
|
||||
self.config = self.load_config()
|
||||
|
||||
def load_config(self) -> Dict[str, Any]:
|
||||
"""Load configuration from YAML file."""
|
||||
if self.config_file.exists():
|
||||
with open(self.config_file, 'r') as f:
|
||||
return yaml.safe_load(f)
|
||||
else:
|
||||
return self.get_default_config()
|
||||
|
||||
def get_default_config(self) -> Dict[str, Any]:
|
||||
"""Get default configuration."""
|
||||
return {
|
||||
"logging": {
|
||||
"level": "INFO",
|
||||
"format": "%(asctime)s - %(name)s - %(levelname)s - %(message)s",
|
||||
"file": "hdh_deployment.log"
|
||||
},
|
||||
"output": {
|
||||
"directory": "hdh_results",
|
||||
"save_plots": True,
|
||||
"plot_format": "png",
|
||||
"plot_dpi": 300
|
||||
},
|
||||
"circuits": {
|
||||
"max_qubits": 10,
|
||||
"default_partitions": 3,
|
||||
"enable_visualization": True,
|
||||
"save_intermediate": False
|
||||
},
|
||||
"performance": {
|
||||
"timeout_seconds": 300,
|
||||
"max_memory_gb": 8,
|
||||
"parallel_processing": False
|
||||
}
|
||||
}
|
||||
|
||||
def save_config(self):
|
||||
"""Save current configuration to file."""
|
||||
with open(self.config_file, 'w') as f:
|
||||
yaml.dump(self.config, f, default_flow_style=False, indent=2)
|
||||
|
||||
def get(self, key_path: str, default=None):
|
||||
"""Get configuration value using dot notation."""
|
||||
keys = key_path.split('.')
|
||||
value = self.config
|
||||
|
||||
for key in keys:
|
||||
if isinstance(value, dict) and key in value:
|
||||
value = value[key]
|
||||
else:
|
||||
return default
|
||||
|
||||
return value
|
||||
|
||||
def set(self, key_path: str, value: Any):
|
||||
"""Set configuration value using dot notation."""
|
||||
keys = key_path.split('.')
|
||||
config = self.config
|
||||
|
||||
for key in keys[:-1]:
|
||||
if key not in config:
|
||||
config[key] = {}
|
||||
config = config[key]
|
||||
|
||||
config[keys[-1]] = value
|
||||
|
||||
|
||||
class ResultsManager:
|
||||
"""Manage and analyze HDH deployment results."""
|
||||
|
||||
def __init__(self, results_dir: str = "hdh_results"):
|
||||
"""Initialize results manager."""
|
||||
self.results_dir = Path(results_dir)
|
||||
self.results_dir.mkdir(exist_ok=True)
|
||||
|
||||
def save_results(self, results: Dict[str, Any], filename: str = None):
|
||||
"""Save results to JSON file."""
|
||||
if filename is None:
|
||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
filename = f"hdh_results_{timestamp}.json"
|
||||
|
||||
filepath = self.results_dir / filename
|
||||
|
||||
with open(filepath, 'w') as f:
|
||||
json.dump(results, f, indent=2, default=str)
|
||||
|
||||
return filepath
|
||||
|
||||
def load_results(self, filename: str) -> Dict[str, Any]:
|
||||
"""Load results from JSON file."""
|
||||
filepath = self.results_dir / filename
|
||||
|
||||
with open(filepath, 'r') as f:
|
||||
return json.load(f)
|
||||
|
||||
def list_result_files(self) -> List[Path]:
|
||||
"""List all result files in the directory."""
|
||||
return list(self.results_dir.glob("*.json"))
|
||||
|
||||
def get_latest_results(self) -> Optional[Dict[str, Any]]:
|
||||
"""Get the most recent results file."""
|
||||
result_files = self.list_result_files()
|
||||
|
||||
if not result_files:
|
||||
return None
|
||||
|
||||
# Sort by modification time
|
||||
latest_file = max(result_files, key=lambda f: f.stat().st_mtime)
|
||||
return self.load_results(latest_file.name)
|
||||
|
||||
def merge_results(self, result_files: List[str]) -> Dict[str, Any]:
|
||||
"""Merge multiple result files."""
|
||||
merged = {
|
||||
"merged_at": datetime.now().isoformat(),
|
||||
"source_files": result_files,
|
||||
"results": []
|
||||
}
|
||||
|
||||
for filename in result_files:
|
||||
try:
|
||||
results = self.load_results(filename)
|
||||
merged["results"].append({
|
||||
"filename": filename,
|
||||
"data": results
|
||||
})
|
||||
except Exception as e:
|
||||
merged["results"].append({
|
||||
"filename": filename,
|
||||
"error": str(e)
|
||||
})
|
||||
|
||||
return merged
|
||||
|
||||
def generate_summary_report(self) -> Dict[str, Any]:
|
||||
"""Generate summary report from all results."""
|
||||
result_files = self.list_result_files()
|
||||
|
||||
if not result_files:
|
||||
return {"error": "No result files found"}
|
||||
|
||||
summary = {
|
||||
"generated_at": datetime.now().isoformat(),
|
||||
"total_files": len(result_files),
|
||||
"file_analysis": []
|
||||
}
|
||||
|
||||
for result_file in result_files:
|
||||
try:
|
||||
results = self.load_results(result_file.name)
|
||||
|
||||
analysis = {
|
||||
"filename": result_file.name,
|
||||
"file_size": result_file.stat().st_size,
|
||||
"modified_at": datetime.fromtimestamp(result_file.stat().st_mtime).isoformat()
|
||||
}
|
||||
|
||||
# Analyze content if it has expected structure
|
||||
if isinstance(results, dict):
|
||||
if "detailed_results" in results:
|
||||
detailed = results["detailed_results"]
|
||||
if isinstance(detailed, list):
|
||||
analysis["circuits_count"] = len(detailed)
|
||||
analysis["successful_circuits"] = sum(1 for r in detailed if r.get("success", False))
|
||||
|
||||
if "summary" in results:
|
||||
summary_data = results["summary"]
|
||||
if isinstance(summary_data, dict):
|
||||
analysis["summary_data"] = summary_data
|
||||
|
||||
summary["file_analysis"].append(analysis)
|
||||
|
||||
except Exception as e:
|
||||
summary["file_analysis"].append({
|
||||
"filename": result_file.name,
|
||||
"error": str(e)
|
||||
})
|
||||
|
||||
return summary
|
||||
|
||||
|
||||
class PlotManager:
|
||||
"""Enhanced plotting utilities for HDH visualization."""
|
||||
|
||||
def __init__(self, output_dir: str = "plots", dpi: int = 300):
|
||||
"""Initialize plot manager."""
|
||||
self.output_dir = Path(output_dir)
|
||||
self.output_dir.mkdir(exist_ok=True)
|
||||
self.dpi = dpi
|
||||
|
||||
# Set matplotlib style
|
||||
plt.style.use('default')
|
||||
self.setup_matplotlib()
|
||||
|
||||
def setup_matplotlib(self):
|
||||
"""Configure matplotlib settings."""
|
||||
plt.rcParams['figure.figsize'] = (12, 8)
|
||||
plt.rcParams['font.size'] = 12
|
||||
plt.rcParams['axes.labelsize'] = 14
|
||||
plt.rcParams['axes.titlesize'] = 16
|
||||
plt.rcParams['legend.fontsize'] = 12
|
||||
plt.rcParams['xtick.labelsize'] = 10
|
||||
plt.rcParams['ytick.labelsize'] = 10
|
||||
|
||||
def create_comparison_plot(self, data: Dict[str, List[float]],
|
||||
title: str, xlabel: str, ylabel: str,
|
||||
filename: str = None) -> Path:
|
||||
"""Create a comparison plot with multiple data series."""
|
||||
fig, ax = plt.subplots(figsize=(12, 8))
|
||||
|
||||
colors = plt.cm.Set1(np.linspace(0, 1, len(data)))
|
||||
|
||||
for (label, values), color in zip(data.items(), colors):
|
||||
x_values = range(len(values))
|
||||
ax.plot(x_values, values, 'o-', label=label, color=color,
|
||||
linewidth=2, markersize=6, alpha=0.8)
|
||||
|
||||
ax.set_xlabel(xlabel)
|
||||
ax.set_ylabel(ylabel)
|
||||
ax.set_title(title)
|
||||
ax.legend()
|
||||
ax.grid(True, alpha=0.3)
|
||||
|
||||
if filename is None:
|
||||
filename = f"comparison_{title.lower().replace(' ', '_')}.png"
|
||||
|
||||
filepath = self.output_dir / filename
|
||||
plt.savefig(filepath, dpi=self.dpi, bbox_inches='tight')
|
||||
plt.close()
|
||||
|
||||
return filepath
|
||||
|
||||
def create_histogram(self, data: List[float], title: str,
|
||||
xlabel: str, ylabel: str = "Frequency",
|
||||
bins: int = 20, filename: str = None) -> Path:
|
||||
"""Create a histogram plot."""
|
||||
fig, ax = plt.subplots(figsize=(10, 6))
|
||||
|
||||
ax.hist(data, bins=bins, alpha=0.7, color='skyblue',
|
||||
edgecolor='black', linewidth=1)
|
||||
|
||||
ax.set_xlabel(xlabel)
|
||||
ax.set_ylabel(ylabel)
|
||||
ax.set_title(title)
|
||||
ax.grid(True, alpha=0.3, axis='y')
|
||||
|
||||
# Add statistics
|
||||
mean_val = np.mean(data)
|
||||
std_val = np.std(data)
|
||||
ax.axvline(mean_val, color='red', linestyle='--',
|
||||
label=f'Mean: {mean_val:.3f}')
|
||||
ax.axvline(mean_val + std_val, color='orange', linestyle='--',
|
||||
alpha=0.7, label=f'±1σ: {std_val:.3f}')
|
||||
ax.axvline(mean_val - std_val, color='orange', linestyle='--', alpha=0.7)
|
||||
ax.legend()
|
||||
|
||||
if filename is None:
|
||||
filename = f"histogram_{title.lower().replace(' ', '_')}.png"
|
||||
|
||||
filepath = self.output_dir / filename
|
||||
plt.savefig(filepath, dpi=self.dpi, bbox_inches='tight')
|
||||
plt.close()
|
||||
|
||||
return filepath
|
||||
|
||||
def create_scatter_plot(self, x_data: List[float], y_data: List[float],
|
||||
title: str, xlabel: str, ylabel: str,
|
||||
labels: List[str] = None, filename: str = None) -> Path:
|
||||
"""Create a scatter plot with optional labels."""
|
||||
fig, ax = plt.subplots(figsize=(10, 8))
|
||||
|
||||
scatter = ax.scatter(x_data, y_data, alpha=0.6, s=60,
|
||||
c=range(len(x_data)), cmap='viridis')
|
||||
|
||||
# Add labels if provided
|
||||
if labels:
|
||||
for i, label in enumerate(labels):
|
||||
ax.annotate(label, (x_data[i], y_data[i]),
|
||||
xytext=(5, 5), textcoords='offset points',
|
||||
fontsize=8, alpha=0.8)
|
||||
|
||||
ax.set_xlabel(xlabel)
|
||||
ax.set_ylabel(ylabel)
|
||||
ax.set_title(title)
|
||||
ax.grid(True, alpha=0.3)
|
||||
|
||||
# Add colorbar
|
||||
plt.colorbar(scatter, ax=ax, label='Data Point Index')
|
||||
|
||||
if filename is None:
|
||||
filename = f"scatter_{title.lower().replace(' ', '_')}.png"
|
||||
|
||||
filepath = self.output_dir / filename
|
||||
plt.savefig(filepath, dpi=self.dpi, bbox_inches='tight')
|
||||
plt.close()
|
||||
|
||||
return filepath
|
||||
|
||||
|
||||
class PerformanceProfiler:
|
||||
"""Performance profiling utilities for HDH operations."""
|
||||
|
||||
def __init__(self):
|
||||
"""Initialize performance profiler."""
|
||||
self.profiles = {}
|
||||
self.active_profiles = {}
|
||||
|
||||
def start_profile(self, name: str):
|
||||
"""Start profiling a named operation."""
|
||||
self.active_profiles[name] = {
|
||||
'start_time': time.perf_counter(),
|
||||
'start_memory': self.get_memory_usage()
|
||||
}
|
||||
|
||||
def end_profile(self, name: str) -> Dict[str, float]:
|
||||
"""End profiling and return metrics."""
|
||||
if name not in self.active_profiles:
|
||||
raise ValueError(f"No active profile named '{name}'")
|
||||
|
||||
profile_data = self.active_profiles.pop(name)
|
||||
|
||||
metrics = {
|
||||
'duration': time.perf_counter() - profile_data['start_time'],
|
||||
'memory_delta': self.get_memory_usage() - profile_data['start_memory'],
|
||||
'timestamp': datetime.now().isoformat()
|
||||
}
|
||||
|
||||
if name not in self.profiles:
|
||||
self.profiles[name] = []
|
||||
|
||||
self.profiles[name].append(metrics)
|
||||
return metrics
|
||||
|
||||
def get_memory_usage(self) -> float:
|
||||
"""Get current memory usage in MB."""
|
||||
try:
|
||||
import psutil
|
||||
process = psutil.Process()
|
||||
return process.memory_info().rss / 1024 / 1024
|
||||
except ImportError:
|
||||
return 0.0
|
||||
|
||||
def get_profile_summary(self, name: str) -> Dict[str, Any]:
|
||||
"""Get summary statistics for a named profile."""
|
||||
if name not in self.profiles:
|
||||
return {"error": f"No profiles found for '{name}'"}
|
||||
|
||||
profiles = self.profiles[name]
|
||||
durations = [p['duration'] for p in profiles]
|
||||
memory_deltas = [p['memory_delta'] for p in profiles]
|
||||
|
||||
return {
|
||||
'name': name,
|
||||
'count': len(profiles),
|
||||
'duration_stats': {
|
||||
'mean': np.mean(durations),
|
||||
'median': np.median(durations),
|
||||
'min': np.min(durations),
|
||||
'max': np.max(durations),
|
||||
'std': np.std(durations)
|
||||
},
|
||||
'memory_stats': {
|
||||
'mean': np.mean(memory_deltas),
|
||||
'median': np.median(memory_deltas),
|
||||
'min': np.min(memory_deltas),
|
||||
'max': np.max(memory_deltas),
|
||||
'std': np.std(memory_deltas)
|
||||
}
|
||||
}
|
||||
|
||||
def export_profiles(self, filepath: str):
|
||||
"""Export all profiles to JSON file."""
|
||||
export_data = {
|
||||
'exported_at': datetime.now().isoformat(),
|
||||
'profiles': self.profiles,
|
||||
'summaries': {name: self.get_profile_summary(name)
|
||||
for name in self.profiles.keys()}
|
||||
}
|
||||
|
||||
with open(filepath, 'w') as f:
|
||||
json.dump(export_data, f, indent=2, default=str)
|
||||
|
||||
|
||||
def setup_logging(log_level: str = "INFO", log_file: str = None) -> logging.Logger:
|
||||
"""Setup standardized logging for HDH deployment."""
|
||||
logger = logging.getLogger("hdh_deployment")
|
||||
logger.setLevel(getattr(logging, log_level.upper()))
|
||||
|
||||
# Clear any existing handlers
|
||||
logger.handlers.clear()
|
||||
|
||||
# Console handler
|
||||
console_handler = logging.StreamHandler(sys.stdout)
|
||||
console_formatter = logging.Formatter(
|
||||
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
||||
)
|
||||
console_handler.setFormatter(console_formatter)
|
||||
logger.addHandler(console_handler)
|
||||
|
||||
# File handler if specified
|
||||
if log_file:
|
||||
file_handler = logging.FileHandler(log_file)
|
||||
file_formatter = logging.Formatter(
|
||||
'%(asctime)s - %(name)s - %(levelname)s - %(funcName)s:%(lineno)d - %(message)s'
|
||||
)
|
||||
file_handler.setFormatter(file_formatter)
|
||||
logger.addHandler(file_handler)
|
||||
|
||||
return logger
|
||||
|
||||
|
||||
def validate_hdh_environment() -> Dict[str, Any]:
|
||||
"""Validate that the HDH environment is properly set up."""
|
||||
validation_results = {
|
||||
'timestamp': datetime.now().isoformat(),
|
||||
'valid': True,
|
||||
'issues': [],
|
||||
'warnings': []
|
||||
}
|
||||
|
||||
# Check HDH import
|
||||
try:
|
||||
import hdh
|
||||
validation_results['hdh_version'] = getattr(hdh, '__version__', 'unknown')
|
||||
except ImportError as e:
|
||||
validation_results['valid'] = False
|
||||
validation_results['issues'].append(f"HDH import failed: {str(e)}")
|
||||
|
||||
# Check required dependencies
|
||||
required_packages = ['qiskit', 'networkx', 'matplotlib', 'numpy']
|
||||
|
||||
for package in required_packages:
|
||||
try:
|
||||
__import__(package)
|
||||
except ImportError:
|
||||
validation_results['issues'].append(f"Missing required package: {package}")
|
||||
validation_results['valid'] = False
|
||||
|
||||
# Check optional dependencies
|
||||
optional_packages = ['metis', 'psutil', 'rich', 'click']
|
||||
|
||||
for package in optional_packages:
|
||||
try:
|
||||
__import__(package)
|
||||
except ImportError:
|
||||
validation_results['warnings'].append(f"Optional package not available: {package}")
|
||||
|
||||
# Check system resources
|
||||
try:
|
||||
import psutil
|
||||
memory_gb = psutil.virtual_memory().total / (1024**3)
|
||||
if memory_gb < 4:
|
||||
validation_results['warnings'].append(f"Low system memory: {memory_gb:.1f}GB")
|
||||
validation_results['system_memory_gb'] = memory_gb
|
||||
except ImportError:
|
||||
validation_results['warnings'].append("Cannot check system memory (psutil not available)")
|
||||
|
||||
return validation_results
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
"""Utility testing and validation."""
|
||||
print("HDH Deployment Utilities")
|
||||
print("=" * 50)
|
||||
print("Special thanks to Maria Gragera Garces for the HDH library!")
|
||||
print()
|
||||
|
||||
# Validate environment
|
||||
validation = validate_hdh_environment()
|
||||
print(f"Environment valid: {validation['valid']}")
|
||||
|
||||
if validation['issues']:
|
||||
print("Issues found:")
|
||||
for issue in validation['issues']:
|
||||
print(f" - {issue}")
|
||||
|
||||
if validation['warnings']:
|
||||
print("Warnings:")
|
||||
for warning in validation['warnings']:
|
||||
print(f" - {warning}")
|
||||
|
||||
# Test configuration manager
|
||||
print("\nTesting ConfigManager...")
|
||||
config_mgr = ConfigManager()
|
||||
print(f"Default output directory: {config_mgr.get('output.directory')}")
|
||||
|
||||
# Test results manager
|
||||
print("\nTesting ResultsManager...")
|
||||
results_mgr = ResultsManager()
|
||||
test_results = {"test": "data", "timestamp": datetime.now().isoformat()}
|
||||
saved_file = results_mgr.save_results(test_results, "test_results.json")
|
||||
print(f"Test results saved to: {saved_file}")
|
||||
|
||||
print("\nUtilities test completed!")
|
||||
Referencia en una nueva incidencia
Block a user