mirror of
https://github.com/Xahau/xahaud.git
synced 2026-01-19 14:15:16 +00:00
Compare commits
32 Commits
l10k
...
ci-experim
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ce6c56b104 | ||
|
|
e342b17172 | ||
|
|
6a4aff7f36 | ||
|
|
9dca972266 | ||
|
|
d8a8030556 | ||
|
|
788684ca74 | ||
|
|
a2d9947e85 | ||
|
|
303a476a53 | ||
|
|
80fbf9e2d0 | ||
|
|
04f9d4fbd1 | ||
|
|
2a2fee3cd3 | ||
|
|
573569f031 | ||
|
|
1ebd067c9f | ||
|
|
7450a302dc | ||
|
|
949252e25f | ||
|
|
544bb4f32e | ||
|
|
7328a26710 | ||
|
|
317a333170 | ||
|
|
33052c2bde | ||
|
|
df2fc9606a | ||
|
|
8c79f4bfcb | ||
|
|
a3012388e3 | ||
|
|
a170b387fc | ||
|
|
841d902dbd | ||
|
|
f8acb88f94 | ||
|
|
3f192ee1b5 | ||
|
|
ed1ba5595d | ||
|
|
1d7f5d42cc | ||
|
|
76a64d0eaa | ||
|
|
2502509e9e | ||
|
|
8a99f8ffc3 | ||
|
|
75849b5314 |
885
.ci/gitea.py
Normal file
885
.ci/gitea.py
Normal file
@@ -0,0 +1,885 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Persistent Gitea for Conan on Self-Hosted GA Runner
|
||||
- Localhost only (127.0.0.1) for security
|
||||
- Persistent volumes survive between workflows
|
||||
- Idempotent - safe to run multiple times
|
||||
- Reuses existing container if already running
|
||||
- Uses pre-baked app.ini to bypass web setup wizard
|
||||
|
||||
What This Script Uses Conan For
|
||||
--------------------------------
|
||||
This script uses Conan only for testing and verification:
|
||||
- Optionally configures host's conan client (if available)
|
||||
- Runs container-based tests to verify the repository works
|
||||
- Tests upload/download of a sample package (zlib) in a container
|
||||
- Verifies authentication and package management work correctly
|
||||
- Does NOT build or manage your actual project dependencies
|
||||
|
||||
The test command runs in a Docker container on the same network as Gitea,
|
||||
exactly mimicking how your GitHub Actions workflows will use it.
|
||||
|
||||
Docker Networking
|
||||
-----------------
|
||||
Gitea is configured with ROOT_URL using the container name for consistency.
|
||||
A Docker network (default: conan-net) is used for container-to-container communication.
|
||||
|
||||
Access methods:
|
||||
|
||||
1. From the host machine:
|
||||
- The host uses http://localhost:3000 (port mapping)
|
||||
- Host's Conan configuration uses localhost
|
||||
|
||||
2. From Docker containers (tests and CI/CD):
|
||||
- Containers use http://gitea-conan-persistent:3000
|
||||
- Containers must be on the same network (default: conan-net)
|
||||
- The test command automatically handles network setup
|
||||
|
||||
The script automatically:
|
||||
- Creates the Docker network if needed
|
||||
- Connects Gitea to the network
|
||||
- Runs tests in containers on the same network
|
||||
|
||||
Example in GitHub Actions workflow:
|
||||
docker network create conan-net
|
||||
docker network connect conan-net gitea-conan-persistent
|
||||
docker run --network conan-net <your-build-container> bash -c "
|
||||
conan remote add gitea-local http://gitea-conan-persistent:3000/api/packages/conan/conan
|
||||
conan user -p conan-pass-2024 -r gitea-local conan
|
||||
conan config set general.revisions_enabled=1 # Required for Conan v1
|
||||
"
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import logging
|
||||
import os
|
||||
import queue
|
||||
import shutil
|
||||
import socket
|
||||
import subprocess
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
from typing import Optional
|
||||
|
||||
|
||||
class DockerLogStreamer(threading.Thread):
|
||||
"""Background thread to stream docker logs -f and pass lines into a queue"""
|
||||
|
||||
def __init__(self, container_name: str, log_queue: queue.Queue):
|
||||
super().__init__(name=f"DockerLogStreamer-{container_name}")
|
||||
self.container = container_name
|
||||
self.log_queue = log_queue
|
||||
self._stop_event = threading.Event()
|
||||
self.proc: Optional[subprocess.Popen] = None
|
||||
self.daemon = True # so it won't block interpreter exit if something goes wrong
|
||||
|
||||
def run(self):
|
||||
try:
|
||||
# Follow logs, capture both stdout and stderr
|
||||
self.proc = subprocess.Popen(
|
||||
["docker", "logs", "-f", self.container],
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT,
|
||||
text=True,
|
||||
bufsize=1,
|
||||
universal_newlines=True,
|
||||
)
|
||||
if not self.proc.stdout:
|
||||
return
|
||||
for line in self.proc.stdout:
|
||||
if line is None:
|
||||
break
|
||||
# Ensure exact line fidelity
|
||||
self.log_queue.put(line.rstrip("\n"))
|
||||
if self._stop_event.is_set():
|
||||
break
|
||||
except Exception as e:
|
||||
# Put an error marker so consumer can see
|
||||
self.log_queue.put(f"[STREAMER_ERROR] {e}")
|
||||
finally:
|
||||
try:
|
||||
if self.proc and self.proc.poll() is None:
|
||||
# Do not kill abruptly unless asked to stop
|
||||
pass
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
def stop(self, timeout: float = 5.0):
|
||||
self._stop_event.set()
|
||||
try:
|
||||
if self.proc and self.proc.poll() is None:
|
||||
# Politely terminate docker logs
|
||||
self.proc.terminate()
|
||||
try:
|
||||
self.proc.wait(timeout=timeout)
|
||||
except Exception:
|
||||
self.proc.kill()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
|
||||
class PersistentGiteaConan:
|
||||
def __init__(self, debug: bool = False, verbose: bool = False):
|
||||
# Configurable via environment variables for CI flexibility
|
||||
self.container = os.getenv("GITEA_CONTAINER_NAME", "gitea-conan-persistent")
|
||||
self.port = int(os.getenv("GITEA_PORT", "3000"))
|
||||
self.user = os.getenv("GITEA_USER", "conan")
|
||||
self.passwd = os.getenv("GITEA_PASSWORD", "conan-pass-2024") # do not print this in logs
|
||||
self.email = os.getenv("GITEA_EMAIL", "conan@localhost")
|
||||
# Persistent data location on the runner
|
||||
self.data_dir = os.getenv("GITEA_DATA_DIR", "/opt/gitea")
|
||||
# Docker network for container communication
|
||||
self.network = os.getenv("GITEA_NETWORK", "conan-net")
|
||||
# Behavior flags
|
||||
self.print_credentials = os.getenv("GITEA_PRINT_CREDENTIALS", "0") == "1"
|
||||
self.startup_timeout = int(os.getenv("GITEA_STARTUP_TIMEOUT", "120"))
|
||||
|
||||
# Logging and docker log streaming infrastructure
|
||||
self._setup_logging(debug=debug, verbose=verbose)
|
||||
self.log_queue: queue.Queue[str] = queue.Queue()
|
||||
self.log_streamer: Optional[DockerLogStreamer] = None
|
||||
# Conan execution context cache
|
||||
self._conan_prefix: Optional[str] = None # '' for direct, full sudo+shell for delegated; None if unavailable
|
||||
|
||||
# Track sensitive values that should be masked in logs
|
||||
self._sensitive_values: set = {self.passwd} # Start with password
|
||||
|
||||
def _setup_logging(self, debug: bool, verbose: bool):
|
||||
# Determine level: debug > verbose > default WARNING
|
||||
if debug:
|
||||
level = logging.DEBUG
|
||||
elif verbose:
|
||||
level = logging.INFO
|
||||
else:
|
||||
level = logging.WARNING
|
||||
logging.basicConfig(level=level, format='%(asctime)s - %(levelname)s - %(filename)s:%(lineno)d - %(message)s')
|
||||
self.logger = logging.getLogger(__name__)
|
||||
# Be slightly quieter for noisy libs
|
||||
logging.getLogger('urllib3').setLevel(logging.WARNING)
|
||||
|
||||
def _mask_sensitive(self, text: str) -> str:
|
||||
"""Mask any sensitive values in text for safe logging"""
|
||||
if not text:
|
||||
return text
|
||||
masked = text
|
||||
for sensitive in self._sensitive_values:
|
||||
if sensitive and sensitive in masked:
|
||||
masked = masked.replace(sensitive, "***REDACTED***")
|
||||
return masked
|
||||
|
||||
def run(self, cmd, check=True, env=None, sensitive=False):
|
||||
"""Run command with minimal output
|
||||
|
||||
Args:
|
||||
cmd: Command to run
|
||||
check: Raise exception on non-zero exit
|
||||
env: Environment variables
|
||||
sensitive: If True, command and output are completely hidden
|
||||
"""
|
||||
run_env = os.environ.copy()
|
||||
if env:
|
||||
run_env.update(env)
|
||||
|
||||
# Log command (masked or hidden based on sensitivity)
|
||||
if sensitive:
|
||||
self.logger.debug("EXEC: [sensitive command hidden]")
|
||||
else:
|
||||
self.logger.debug(f"EXEC: {self._mask_sensitive(cmd)}")
|
||||
|
||||
result = subprocess.run(cmd, shell=True, capture_output=True, text=True, env=run_env)
|
||||
|
||||
# Log output (masked or hidden based on sensitivity)
|
||||
if not sensitive:
|
||||
if result.stdout:
|
||||
self.logger.debug(f"STDOUT: {self._mask_sensitive(result.stdout.strip())}"[:1000])
|
||||
if result.stderr:
|
||||
self.logger.debug(f"STDERR: {self._mask_sensitive(result.stderr.strip())}"[:1000])
|
||||
|
||||
if result.returncode != 0 and check:
|
||||
if sensitive:
|
||||
self.logger.error(f"Command failed ({result.returncode})")
|
||||
raise RuntimeError("Command failed (details hidden for security)")
|
||||
else:
|
||||
self.logger.error(f"Command failed ({result.returncode}) for: {self._mask_sensitive(cmd)}")
|
||||
raise RuntimeError(f"Command failed: {self._mask_sensitive(result.stderr)}")
|
||||
return result
|
||||
|
||||
def is_running(self):
|
||||
"""Check if container is already running"""
|
||||
result = self.run(f"docker ps -q -f name={self.container}", check=False)
|
||||
return bool(result.stdout.strip())
|
||||
|
||||
def container_exists(self):
|
||||
"""Check if container exists (running or stopped)"""
|
||||
result = self.run(f"docker ps -aq -f name={self.container}", check=False)
|
||||
return bool(result.stdout.strip())
|
||||
|
||||
# ---------- Helpers & Preflight Checks ----------
|
||||
|
||||
def _check_docker(self):
|
||||
if not shutil.which("docker"):
|
||||
raise RuntimeError(
|
||||
"Docker is not installed or not in PATH. Please install Docker and ensure the daemon is running.")
|
||||
# Check daemon access
|
||||
info = subprocess.run("docker info", shell=True, capture_output=True, text=True)
|
||||
if info.returncode != 0:
|
||||
raise RuntimeError(
|
||||
"Docker daemon not accessible. Ensure the Docker service is running and the current user has permission to use Docker.")
|
||||
|
||||
def _is_port_in_use(self, host, port):
|
||||
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
||||
s.settimeout(0.5)
|
||||
return s.connect_ex((host, port)) == 0
|
||||
|
||||
def _setup_directories(self):
|
||||
"""Create directory structure with proper ownership"""
|
||||
gitea_data = os.path.join(self.data_dir, "gitea")
|
||||
gitea_conf = os.path.join(gitea_data, "gitea", "conf")
|
||||
|
||||
# Create all directories
|
||||
os.makedirs(gitea_conf, exist_ok=True)
|
||||
self.logger.info(f"📁 Created directory structure: {self.data_dir}")
|
||||
|
||||
# Set ownership recursively to git user (UID 1000)
|
||||
for root, dirs, files in os.walk(self.data_dir):
|
||||
os.chown(root, 1000, 1000)
|
||||
for d in dirs:
|
||||
os.chown(os.path.join(root, d), 1000, 1000)
|
||||
for f in files:
|
||||
os.chown(os.path.join(root, f), 1000, 1000)
|
||||
|
||||
def _preflight(self):
|
||||
self.logger.info("🔍 Running preflight checks...")
|
||||
self._check_docker()
|
||||
# Port check only if our container is not already running
|
||||
if not self.is_running():
|
||||
if self._is_port_in_use("127.0.0.1", self.port):
|
||||
raise RuntimeError(f"Port {self.port} on 127.0.0.1 is already in use. Cannot bind Gitea.")
|
||||
self.logger.info("✓ Preflight checks passed")
|
||||
|
||||
def _generate_secret(self, secret_type):
|
||||
"""Generate a secret using Gitea's built-in generator"""
|
||||
cmd = f"docker run --rm gitea/gitea:latest gitea generate secret {secret_type}"
|
||||
result = self.run(cmd, sensitive=True) # Don't log the output
|
||||
secret = result.stdout.strip()
|
||||
if secret:
|
||||
self._sensitive_values.add(secret) # Track this secret for masking
|
||||
self.logger.debug(f"Generated {secret_type} successfully")
|
||||
return secret
|
||||
|
||||
def _create_app_ini(self, gitea_conf_dir):
|
||||
"""Create a pre-configured app.ini file"""
|
||||
app_ini_path = os.path.join(gitea_conf_dir, "app.ini")
|
||||
|
||||
# Check if app.ini already exists (from previous run)
|
||||
if os.path.exists(app_ini_path):
|
||||
self.logger.info("✓ Using existing app.ini configuration")
|
||||
# Minimal migration: ensure HTTP_ADDR allows inbound connections from host via Docker mapping
|
||||
try:
|
||||
with open(app_ini_path, 'r+', encoding='utf-8') as f:
|
||||
content = f.read()
|
||||
updated = False
|
||||
if "HTTP_ADDR = 127.0.0.1" in content:
|
||||
content = content.replace("HTTP_ADDR = 127.0.0.1", "HTTP_ADDR = 0.0.0.0")
|
||||
updated = True
|
||||
if updated:
|
||||
f.seek(0)
|
||||
f.write(content)
|
||||
f.truncate()
|
||||
self.logger.info("🔁 Updated existing app.ini to bind on 0.0.0.0 for container reachability")
|
||||
except Exception as e:
|
||||
self.logger.warning(f"⚠️ Could not update existing app.ini automatically: {e}")
|
||||
return
|
||||
|
||||
self.logger.info("🔑 Generating security secrets...")
|
||||
secret_key = self._generate_secret("SECRET_KEY")
|
||||
internal_token = self._generate_secret("INTERNAL_TOKEN")
|
||||
|
||||
self.logger.info("📝 Creating app.ini configuration...")
|
||||
app_ini_content = f"""APP_NAME = Conan Package Registry
|
||||
RUN_USER = git
|
||||
RUN_MODE = prod
|
||||
|
||||
[server]
|
||||
ROOT_URL = http://gitea-conan-persistent:{self.port}/
|
||||
HTTP_ADDR = 0.0.0.0
|
||||
HTTP_PORT = 3000
|
||||
DISABLE_SSH = true
|
||||
START_SSH_SERVER = false
|
||||
OFFLINE_MODE = true
|
||||
DOMAIN = gitea-conan-persistent
|
||||
LFS_START_SERVER = false
|
||||
|
||||
[database]
|
||||
DB_TYPE = sqlite3
|
||||
PATH = /data/gitea.db
|
||||
LOG_SQL = false
|
||||
|
||||
[repository]
|
||||
ROOT = /data/gitea-repositories
|
||||
DISABLED_REPO_UNITS = repo.issues, repo.pulls, repo.wiki, repo.projects, repo.actions
|
||||
|
||||
[security]
|
||||
INSTALL_LOCK = true
|
||||
SECRET_KEY = {secret_key}
|
||||
INTERNAL_TOKEN = {internal_token}
|
||||
PASSWORD_HASH_ALGO = pbkdf2
|
||||
MIN_PASSWORD_LENGTH = 8
|
||||
|
||||
[service]
|
||||
DISABLE_REGISTRATION = true
|
||||
ENABLE_NOTIFY_MAIL = false
|
||||
REGISTER_EMAIL_CONFIRM = false
|
||||
ENABLE_CAPTCHA = false
|
||||
REQUIRE_SIGNIN_VIEW = false
|
||||
DEFAULT_KEEP_EMAIL_PRIVATE = true
|
||||
DEFAULT_ALLOW_CREATE_ORGANIZATION = false
|
||||
DEFAULT_ENABLE_TIMETRACKING = false
|
||||
|
||||
[mailer]
|
||||
ENABLED = false
|
||||
|
||||
[session]
|
||||
PROVIDER = file
|
||||
|
||||
[log]
|
||||
MODE = console
|
||||
LEVEL = Info
|
||||
|
||||
[api]
|
||||
ENABLE_SWAGGER = false
|
||||
|
||||
[packages]
|
||||
ENABLED = true
|
||||
|
||||
[other]
|
||||
SHOW_FOOTER_VERSION = false
|
||||
"""
|
||||
|
||||
# Write app.ini with restrictive permissions
|
||||
with open(app_ini_path, 'w') as f:
|
||||
f.write(app_ini_content)
|
||||
|
||||
# Set ownership to UID 1000:1000 (git user in container)
|
||||
os.chown(app_ini_path, 1000, 1000)
|
||||
os.chmod(app_ini_path, 0o640)
|
||||
|
||||
self.logger.info("✓ Created app.ini with pre-generated secrets")
|
||||
|
||||
def setup(self):
|
||||
"""Setup or verify Gitea is running"""
|
||||
self.logger.info("🔧 Setting up persistent Gitea for Conan...")
|
||||
# Preflight
|
||||
self._preflight()
|
||||
|
||||
# Create persistent data directory structure with proper ownership
|
||||
self._setup_directories()
|
||||
gitea_data = os.path.join(self.data_dir, "gitea")
|
||||
gitea_conf = os.path.join(gitea_data, "gitea", "conf")
|
||||
|
||||
# Create app.ini BEFORE starting container (for headless setup)
|
||||
self._create_app_ini(gitea_conf)
|
||||
|
||||
# Check if already running
|
||||
if self.is_running():
|
||||
self.logger.info("✅ Gitea container already running")
|
||||
self._verify_health()
|
||||
self._configure_conan()
|
||||
return
|
||||
|
||||
# Check if container exists but stopped
|
||||
if self.container_exists():
|
||||
self.logger.info("🔄 Starting existing container...")
|
||||
self.run(f"docker start {self.container}")
|
||||
# Start log streaming for visibility
|
||||
self._start_log_streaming()
|
||||
try:
|
||||
time.sleep(2)
|
||||
self._verify_health()
|
||||
self._configure_conan()
|
||||
finally:
|
||||
self._stop_log_streaming()
|
||||
return
|
||||
|
||||
# Create new container (first time setup)
|
||||
self.logger.info("🚀 Creating new Gitea container...")
|
||||
|
||||
gitea_data = os.path.join(self.data_dir, "gitea")
|
||||
|
||||
# IMPORTANT: Bind to 127.0.0.1 only for security
|
||||
# With pre-configured app.ini, Gitea starts directly without wizard
|
||||
docker_cmd = f"""docker run -d \
|
||||
--name {self.container} \
|
||||
-p 127.0.0.1:{self.port}:3000 \
|
||||
-v {gitea_data}:/data \
|
||||
-v /etc/timezone:/etc/timezone:ro \
|
||||
-v /etc/localtime:/etc/localtime:ro \
|
||||
-e USER_UID=1000 \
|
||||
-e USER_GID=1000 \
|
||||
--restart unless-stopped \
|
||||
gitea/gitea:latest"""
|
||||
|
||||
self.run(docker_cmd)
|
||||
|
||||
# Debug: Check actual port mapping
|
||||
port_check = self.run(f"docker port {self.container}", check=False)
|
||||
self.logger.info(f"🔍 Container port mapping: {port_check.stdout.strip()}")
|
||||
|
||||
# Start log streaming and wait for Gitea to be ready
|
||||
self._start_log_streaming()
|
||||
try:
|
||||
self._wait_for_startup(self.startup_timeout)
|
||||
# Create user (idempotent)
|
||||
self._create_user()
|
||||
# Configure Conan
|
||||
self._configure_conan()
|
||||
finally:
|
||||
self._stop_log_streaming()
|
||||
|
||||
self.logger.info("✅ Persistent Gitea ready for Conan packages!")
|
||||
self.logger.info(f" URL: http://localhost:{self.port}")
|
||||
if self.print_credentials:
|
||||
self.logger.info(f" User: {self.user} / {self.passwd}")
|
||||
else:
|
||||
self.logger.info(" Credentials: hidden (set GITEA_PRINT_CREDENTIALS=1 to display)")
|
||||
self.logger.info(f" Data persisted in: {self.data_dir}")
|
||||
|
||||
def _start_log_streaming(self):
|
||||
# Start background docker log streamer if not running
|
||||
if self.log_streamer is not None:
|
||||
self._stop_log_streaming()
|
||||
self.logger.debug("Starting Docker log streamer...")
|
||||
self.log_streamer = DockerLogStreamer(self.container, self.log_queue)
|
||||
self.log_streamer.start()
|
||||
|
||||
def _stop_log_streaming(self):
|
||||
if self.log_streamer is not None:
|
||||
self.logger.debug("Stopping Docker log streamer...")
|
||||
try:
|
||||
self.log_streamer.stop()
|
||||
self.log_streamer.join(timeout=5)
|
||||
except Exception:
|
||||
pass
|
||||
finally:
|
||||
self.log_streamer = None
|
||||
|
||||
def _wait_for_startup(self, timeout=60):
|
||||
"""Wait for container to become healthy by consuming the docker log stream"""
|
||||
self.logger.info(f"⏳ Waiting for Gitea to start (timeout: {timeout}s)...")
|
||||
start_time = time.time()
|
||||
server_detected = False
|
||||
|
||||
while time.time() - start_time < timeout:
|
||||
# Drain all available log lines without blocking
|
||||
drained_any = False
|
||||
while True:
|
||||
try:
|
||||
line = self.log_queue.get_nowait()
|
||||
drained_any = True
|
||||
except queue.Empty:
|
||||
break
|
||||
if not line.strip():
|
||||
continue
|
||||
# Always log raw docker lines at DEBUG level
|
||||
self.logger.debug(f"DOCKER: {line}")
|
||||
# Promote important events
|
||||
l = line
|
||||
if ("[E]" in l) or ("ERROR" in l) or ("FATAL" in l) or ("panic" in l):
|
||||
self.logger.error(l)
|
||||
elif ("WARN" in l) or ("[W]" in l):
|
||||
self.logger.warning(l)
|
||||
# Detect startup listening lines
|
||||
if ("Web server is now listening" in l) or ("Listen:" in l) or ("Starting new Web server" in l):
|
||||
if not server_detected:
|
||||
server_detected = True
|
||||
self.logger.info("✓ Detected web server startup!")
|
||||
self.logger.info("⏳ Waiting for Gitea to fully initialize...")
|
||||
# Quick readiness loop
|
||||
for i in range(10):
|
||||
time.sleep(1)
|
||||
if self._is_healthy():
|
||||
self.logger.info(f"✓ Gitea is ready and responding! (after {i + 1} seconds)")
|
||||
return
|
||||
self.logger.warning(
|
||||
"Server started but health check failed after 10 attempts, continuing to wait...")
|
||||
|
||||
# Check if container is still running periodically
|
||||
container_status = self.run(
|
||||
f"docker inspect {self.container} --format='{{{{.State.Status}}}}'",
|
||||
check=False
|
||||
)
|
||||
status = (container_status.stdout or "").strip()
|
||||
if status and status != "running":
|
||||
# Container stopped or in error state
|
||||
error_logs = self.run(
|
||||
f"docker logs --tail 30 {self.container} 2>&1",
|
||||
check=False
|
||||
)
|
||||
self.logger.error(f"Container is in '{status}' state. Last logs:")
|
||||
for l in (error_logs.stdout or "").split('\n')[-10:]:
|
||||
if l.strip():
|
||||
self.logger.error(l)
|
||||
raise RuntimeError(f"Container failed to start (status: {status})")
|
||||
|
||||
# If nothing drained, brief sleep to avoid busy loop
|
||||
if not drained_any:
|
||||
time.sleep(0.5)
|
||||
|
||||
raise TimeoutError(f"Gitea failed to become ready within {timeout} seconds")
|
||||
|
||||
def _is_healthy(self):
|
||||
"""Check if Gitea is responding"""
|
||||
# Try a simple HTTP GET first (less verbose)
|
||||
result = self.run(
|
||||
f"curl -s -o /dev/null -w '%{{http_code}}' http://localhost:{self.port}/",
|
||||
check=False
|
||||
)
|
||||
code = result.stdout.strip()
|
||||
# Treat any 2xx/3xx as healthy (e.g., 200 OK, 302/303 redirects)
|
||||
if code and code[0] in ("2", "3"):
|
||||
return True
|
||||
|
||||
# If it failed, show debug info
|
||||
if code == "000":
|
||||
# Only show debug on first failure
|
||||
if not hasattr(self, '_health_check_debug_shown'):
|
||||
self._health_check_debug_shown = True
|
||||
self.logger.info("🔍 Connection issue detected, showing diagnostics:")
|
||||
|
||||
# Check what's actually listening
|
||||
netstat_result = self.run(f"netstat -tln | grep {self.port}", check=False)
|
||||
self.logger.info(f" Port {self.port} listeners: {netstat_result.stdout.strip() or 'none found'}")
|
||||
|
||||
# Check docker port mapping
|
||||
port_result = self.run(f"docker port {self.container} 3000", check=False)
|
||||
self.logger.info(f" Docker mapping: {port_result.stdout.strip() or 'not mapped'}")
|
||||
|
||||
return False
|
||||
|
||||
def _verify_health(self):
|
||||
"""Verify Gitea is healthy"""
|
||||
if not self._is_healthy():
|
||||
raise RuntimeError("Gitea is not responding properly")
|
||||
self.logger.info("✅ Gitea is healthy")
|
||||
|
||||
def _ensure_network(self):
|
||||
"""Ensure Docker network exists and Gitea is connected to it"""
|
||||
# Create network if it doesn't exist (idempotent)
|
||||
self.run(f"docker network create {self.network} 2>/dev/null || true", check=False)
|
||||
|
||||
# Connect Gitea to the network if not already connected (idempotent)
|
||||
self.run(f"docker network connect {self.network} {self.container} 2>/dev/null || true", check=False)
|
||||
|
||||
self.logger.debug(f"Ensured {self.container} is connected to {self.network} network")
|
||||
|
||||
# ---------- Conan helpers ----------
|
||||
def _resolve_conan_prefix(self) -> Optional[str]:
|
||||
"""Determine how to run the 'conan' CLI and cache the decision.
|
||||
Returns:
|
||||
'' for direct invocation (conan in PATH),
|
||||
full sudo+login-shell prefix string for delegated execution, or
|
||||
None if Conan is not available.
|
||||
"""
|
||||
if self._conan_prefix is not None:
|
||||
return self._conan_prefix
|
||||
|
||||
# If running with sudo, try actual user's login shell
|
||||
if os.geteuid() == 0 and 'SUDO_USER' in os.environ:
|
||||
actual_user = os.environ['SUDO_USER']
|
||||
# Discover the user's shell
|
||||
shell_result = self.run(f"getent passwd {actual_user} | cut -d: -f7", check=False)
|
||||
user_shell = shell_result.stdout.strip() if shell_result.returncode == 0 and shell_result.stdout.strip() else "/bin/bash"
|
||||
self.logger.info(f"→ Using {actual_user}'s shell for Conan: {user_shell}")
|
||||
|
||||
which_result = self.run(f"sudo -u {actual_user} {user_shell} -l -c 'which conan'", check=False)
|
||||
if which_result.returncode == 0 and which_result.stdout.strip():
|
||||
self._conan_prefix = f"sudo -u {actual_user} {user_shell} -l -c"
|
||||
self.logger.info(f"✓ Found Conan at: {which_result.stdout.strip()}")
|
||||
return self._conan_prefix
|
||||
else:
|
||||
self.logger.warning(f"⚠️ Conan not found in {actual_user}'s PATH.")
|
||||
self._conan_prefix = None
|
||||
return self._conan_prefix
|
||||
else:
|
||||
# Non-sudo case; check PATH directly
|
||||
if shutil.which("conan"):
|
||||
self._conan_prefix = ''
|
||||
return self._conan_prefix
|
||||
else:
|
||||
self.logger.warning("⚠️ Conan CLI not found in PATH.")
|
||||
self._conan_prefix = None
|
||||
return self._conan_prefix
|
||||
|
||||
def _build_conan_cmd(self, inner_args: str) -> Optional[str]:
|
||||
"""Build a shell command to run Conan with given inner arguments.
|
||||
Example: inner_args='remote list' => 'conan remote list' or "sudo -u user shell -l -c 'conan remote list'".
|
||||
Returns None if Conan is unavailable.
|
||||
"""
|
||||
prefix = self._resolve_conan_prefix()
|
||||
if prefix is None:
|
||||
return None
|
||||
if prefix == '':
|
||||
return f"conan {inner_args}"
|
||||
# Delegate via sudo+login shell; quote the inner command
|
||||
return f"{prefix} 'conan {inner_args}'"
|
||||
|
||||
def _run_conan(self, inner_args: str, check: bool = False):
|
||||
"""Run a Conan subcommand using the resolved execution context.
|
||||
Returns the subprocess.CompletedProcess-like result, or a dummy object with returncode=127 if unavailable.
|
||||
"""
|
||||
full_cmd = self._build_conan_cmd(inner_args)
|
||||
if full_cmd is None:
|
||||
# Construct a minimal dummy result
|
||||
class Dummy:
|
||||
returncode = 127
|
||||
stdout = ''
|
||||
stderr = 'conan: not found'
|
||||
self.logger.error("❌ Conan CLI is not available. Skipping command: conan " + inner_args)
|
||||
return Dummy()
|
||||
return self.run(full_cmd, check=check)
|
||||
|
||||
def _run_conan_sensitive(self, inner_args: str, check: bool = False):
|
||||
"""Run a sensitive Conan subcommand (e.g., with passwords) using the resolved execution context."""
|
||||
full_cmd = self._build_conan_cmd(inner_args)
|
||||
if full_cmd is None:
|
||||
class Dummy:
|
||||
returncode = 127
|
||||
stdout = ''
|
||||
stderr = 'conan: not found'
|
||||
self.logger.error("❌ Conan CLI is not available. Skipping sensitive command")
|
||||
return Dummy()
|
||||
return self.run(full_cmd, check=check, sensitive=True)
|
||||
|
||||
def _create_user(self):
|
||||
"""Create Conan user (idempotent)"""
|
||||
self.logger.info("👤 Setting up admin user...")
|
||||
|
||||
# Retry a few times in case DB initialization lags behind
|
||||
attempts = 5
|
||||
for i in range(1, attempts + 1):
|
||||
# First check if user exists
|
||||
check_cmd = f"docker exec -u 1000:1000 {self.container} gitea admin user list"
|
||||
result = self.run(check_cmd, check=False)
|
||||
if result.returncode == 0 and self.user in result.stdout:
|
||||
self.logger.info(f"✅ User already exists: {self.user}")
|
||||
return
|
||||
|
||||
# Try to create admin user with --admin flag
|
||||
create_cmd = f"""docker exec -u 1000:1000 {self.container} \
|
||||
gitea admin user create \
|
||||
--username {self.user} \
|
||||
--password {self.passwd} \
|
||||
--email {self.email} \
|
||||
--admin \
|
||||
--must-change-password=false"""
|
||||
create_res = self.run(create_cmd, check=False, sensitive=True)
|
||||
if create_res.returncode == 0:
|
||||
self.logger.info(f"✅ Created admin user: {self.user}")
|
||||
return
|
||||
if "already exists" in (create_res.stderr or "").lower() or "already exists" in (
|
||||
create_res.stdout or "").lower():
|
||||
self.logger.info(f"✅ User already exists: {self.user}")
|
||||
return
|
||||
|
||||
if i < attempts:
|
||||
delay = min(2 ** i, 10)
|
||||
time.sleep(delay)
|
||||
|
||||
self.logger.warning(f"⚠️ Could not create user after {attempts} attempts. You may need to create it manually.")
|
||||
|
||||
def _configure_conan(self):
|
||||
"""Configure Conan client (idempotent)"""
|
||||
self.logger.info("🔧 Configuring Conan client on host...")
|
||||
|
||||
# Ensure Conan is available and determine execution context
|
||||
if self._resolve_conan_prefix() is None:
|
||||
self.logger.warning("⚠️ Conan CLI not available on host. Skipping client configuration.")
|
||||
self.logger.info(" Note: Tests will still work using container-based Conan.")
|
||||
return
|
||||
|
||||
# For host-based Conan, we still use localhost since the host can't resolve container names
|
||||
# Container-based tests will use gitea-conan-persistent directly
|
||||
conan_url = f"http://localhost:{self.port}/api/packages/{self.user}/conan"
|
||||
|
||||
# Remove old remote if exists (ignore errors)
|
||||
self._run_conan("remote remove gitea-local 2>/dev/null", check=False)
|
||||
|
||||
# Add Gitea as remote
|
||||
self._run_conan(f"remote add gitea-local {conan_url}")
|
||||
|
||||
# Authenticate (mark as sensitive even though Conan masks password in process list)
|
||||
self._run_conan_sensitive(f"user -p {self.passwd} -r gitea-local {self.user}")
|
||||
|
||||
# Enable revisions if not already
|
||||
self._run_conan("config set general.revisions_enabled=1", check=False)
|
||||
|
||||
self.logger.info(f"✅ Host Conan configured with remote: gitea-local (via localhost)")
|
||||
self.logger.info(f" Container tests will use: http://gitea-conan-persistent:{self.port}")
|
||||
|
||||
def verify(self):
|
||||
"""Verify everything is working"""
|
||||
self.logger.info("🔍 Verifying setup...")
|
||||
|
||||
# Check container
|
||||
if not self.is_running():
|
||||
self.logger.error("❌ Container not running")
|
||||
return False
|
||||
|
||||
# Check Gitea health
|
||||
if not self._is_healthy():
|
||||
self.logger.error("❌ Gitea not responding")
|
||||
return False
|
||||
|
||||
# Check Conan remote
|
||||
result = self._run_conan("remote list", check=False)
|
||||
if getattr(result, 'returncode', 1) != 0 or "gitea-local" not in (getattr(result, 'stdout', '') or ''):
|
||||
self.logger.error("❌ Conan remote not configured")
|
||||
return False
|
||||
|
||||
self.logger.info("✅ All systems operational")
|
||||
return True
|
||||
|
||||
def info(self):
|
||||
"""Print current status"""
|
||||
self.logger.info("📊 Gitea Status:")
|
||||
self.logger.info(f" Container: {self.container}")
|
||||
self.logger.info(f" Running: {self.is_running()}")
|
||||
self.logger.info(f" Data dir: {self.data_dir}")
|
||||
self.logger.info(f" URL: http://localhost:{self.port}")
|
||||
self.logger.info(f" Conan URL: http://localhost:{self.port}/api/packages/{self.user}/conan")
|
||||
|
||||
# Show disk usage
|
||||
if os.path.exists(self.data_dir):
|
||||
result = self.run(f"du -sh {self.data_dir}", check=False)
|
||||
if result.returncode == 0:
|
||||
size = result.stdout.strip().split('\t')[0]
|
||||
self.logger.info(f" Disk usage: {size}")
|
||||
|
||||
def test(self):
|
||||
"""Test Conan package upload/download in a container"""
|
||||
self.logger.info("🧪 Testing Conan with Gitea (container-based test)...")
|
||||
|
||||
# Ensure everything is set up
|
||||
if not self.is_running():
|
||||
self.logger.error("❌ Gitea not running. Run 'setup' first.")
|
||||
return False
|
||||
|
||||
# Ensure network exists and Gitea is connected
|
||||
self._ensure_network()
|
||||
|
||||
# Test package name
|
||||
test_package = "zlib/1.3.1"
|
||||
package_name = test_package.split('/')[0] # Extract just the package name
|
||||
self.logger.info(f" → Testing with package: {test_package}")
|
||||
self.logger.info(f" → Running test in container on {self.network} network")
|
||||
|
||||
# Run test in a container (same environment as production)
|
||||
test_cmd = f"""docker run --rm --network {self.network} conanio/gcc11 bash -ec "
|
||||
# Configure Conan to use Gitea
|
||||
conan remote add gitea-local http://gitea-conan-persistent:{self.port}/api/packages/{self.user}/conan
|
||||
conan user -p {self.passwd} -r gitea-local {self.user}
|
||||
conan config set general.revisions_enabled=1
|
||||
|
||||
# Test package upload/download
|
||||
echo '→ Building {test_package} from source...'
|
||||
conan install {test_package}@ --build={test_package}
|
||||
|
||||
echo '→ Uploading to Gitea...'
|
||||
conan upload '{package_name}/*' --all -r gitea-local --confirm
|
||||
|
||||
echo '→ Removing local copy...'
|
||||
conan remove '{package_name}/*' -f
|
||||
|
||||
echo '→ Downloading from Gitea...'
|
||||
conan install {test_package}@ -r gitea-local
|
||||
|
||||
echo '✅ Container-based test successful!'
|
||||
" """
|
||||
|
||||
result = self.run(test_cmd, check=False, sensitive=False) # Temporarily show output for debugging
|
||||
|
||||
if result.returncode == 0:
|
||||
self.logger.info("✅ Test successful! Package uploaded and downloaded from Gitea.")
|
||||
return True
|
||||
else:
|
||||
self.logger.error("❌ Test failed. Check the output above for details.")
|
||||
return False
|
||||
|
||||
def teardown(self):
|
||||
"""Stop and remove Gitea container and data"""
|
||||
self.logger.info("🛑 Tearing down Gitea...")
|
||||
|
||||
# Stop and remove container
|
||||
if self.container_exists():
|
||||
self.logger.info(f" → Stopping container: {self.container}")
|
||||
self.run(f"docker stop {self.container}", check=False)
|
||||
self.logger.info(f" → Removing container: {self.container}")
|
||||
self.run(f"docker rm {self.container}", check=False)
|
||||
else:
|
||||
self.logger.info(" → No container to remove")
|
||||
|
||||
# Remove data directory
|
||||
if os.path.exists(self.data_dir):
|
||||
self.logger.info(f" → Removing data directory: {self.data_dir}")
|
||||
shutil.rmtree(self.data_dir)
|
||||
self.logger.info(" ✓ Data directory removed")
|
||||
else:
|
||||
self.logger.info(" → No data directory to remove")
|
||||
|
||||
# Remove Docker network if it exists
|
||||
network_check = self.run(f"docker network ls --format '{{{{.Name}}}}' | grep '^{self.network}$'", check=False)
|
||||
if network_check.stdout.strip():
|
||||
self.logger.info(f" → Removing Docker network: {self.network}")
|
||||
self.run(f"docker network rm {self.network}", check=False)
|
||||
self.logger.info(" ✓ Network removed")
|
||||
else:
|
||||
self.logger.info(f" → No network '{self.network}' to remove")
|
||||
|
||||
self.logger.info(" ✅ Teardown complete!")
|
||||
|
||||
|
||||
# For use in GitHub Actions workflows
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(description='Persistent Gitea for Conan packages')
|
||||
parser.add_argument('command', choices=['setup', 'teardown', 'verify', 'info', 'test'], nargs='?', default='setup')
|
||||
parser.add_argument('--debug', action='store_true', help='Enable debug logging')
|
||||
parser.add_argument('--verbose', action='store_true', help='Enable verbose logging (info level)')
|
||||
args = parser.parse_args()
|
||||
|
||||
# Temporary logging before instance creation (level will be reconfigured inside class)
|
||||
temp_level = logging.DEBUG if args.debug else (logging.INFO if args.verbose else logging.WARNING)
|
||||
logging.basicConfig(level=temp_level, format='%(asctime)s - %(levelname)s - %(filename)s:%(lineno)d - %(message)s')
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Auto-escalate to sudo for operations that need it
|
||||
needs_sudo = args.command in ['setup', 'teardown']
|
||||
if needs_sudo and os.geteuid() != 0:
|
||||
logger.info("📋 This operation requires sudo privileges. Re-running with sudo...")
|
||||
os.execvp('sudo', ['sudo'] + sys.argv)
|
||||
|
||||
gitea = PersistentGiteaConan(debug=args.debug, verbose=args.verbose)
|
||||
|
||||
try:
|
||||
if args.command == "setup":
|
||||
gitea.setup()
|
||||
elif args.command == "verify":
|
||||
sys.exit(0 if gitea.verify() else 1)
|
||||
elif args.command == "info":
|
||||
gitea.info()
|
||||
elif args.command == "test":
|
||||
sys.exit(0 if gitea.test() else 1)
|
||||
elif args.command == "teardown":
|
||||
gitea.teardown()
|
||||
except KeyboardInterrupt:
|
||||
logger.warning("Interrupted by user. Cleaning up...")
|
||||
try:
|
||||
gitea._stop_log_streaming()
|
||||
except Exception:
|
||||
pass
|
||||
sys.exit(130)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
115
.github/workflows/build-in-docker.yml
vendored
115
.github/workflows/build-in-docker.yml
vendored
@@ -2,9 +2,9 @@ name: Build using Docker
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: ["dev", "candidate", "release", "jshooks"]
|
||||
branches: ["dev", "candidate", "release", "ci-experiments"]
|
||||
pull_request:
|
||||
branches: ["dev", "candidate", "release", "jshooks"]
|
||||
branches: ["dev", "candidate", "release", "ci-experiments"]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
@@ -49,39 +49,93 @@ jobs:
|
||||
run:
|
||||
working-directory: ${{ needs.checkout.outputs.checkout_path }}
|
||||
steps:
|
||||
- name: Set Cleanup Script Path
|
||||
- name: Install Python & pipx
|
||||
run: |
|
||||
echo "JOB_CLEANUP_SCRIPT=$(mktemp)" >> $GITHUB_ENV
|
||||
sudo apt update && sudo apt install -y python3 python3-pip pipx python-is-python3
|
||||
|
||||
- name: Build using Docker
|
||||
run: /bin/bash release-builder.sh
|
||||
|
||||
- name: Stop Container (Cleanup)
|
||||
if: always()
|
||||
- name: Install Conan
|
||||
run: |
|
||||
echo "Running cleanup script: $JOB_CLEANUP_SCRIPT"
|
||||
/bin/bash -e -x "$JOB_CLEANUP_SCRIPT"
|
||||
CLEANUP_EXIT_CODE=$?
|
||||
pipx install "conan<2.0"
|
||||
/root/.local/bin/conan --version # PATH doesn't seem to be set correctly
|
||||
|
||||
if [[ "$CLEANUP_EXIT_CODE" -eq 0 ]]; then
|
||||
echo "Cleanup script succeeded."
|
||||
rm -f "$JOB_CLEANUP_SCRIPT"
|
||||
echo "Cleanup script removed."
|
||||
else
|
||||
echo "⚠️ Cleanup script failed! Keeping for debugging: $JOB_CLEANUP_SCRIPT"
|
||||
fi
|
||||
- name: Setup network and Gitea
|
||||
run: |
|
||||
# Create network for container communication (idempotent)
|
||||
docker network create conan-net 2>/dev/null || true
|
||||
|
||||
# Setup Gitea
|
||||
PATH="/root/.local/bin:$PATH" python .ci/gitea.py setup --debug
|
||||
|
||||
if [[ "${DEBUG_BUILD_CONTAINERS_AFTER_CLEANUP}" == "1" ]]; then
|
||||
echo "🔍 Checking for leftover containers..."
|
||||
BUILD_CONTAINERS=$(docker ps --format '{{.Names}}' | grep '^xahaud_cached_builder' || echo "")
|
||||
# Connect Gitea to the network (idempotent)
|
||||
docker network connect conan-net gitea-conan-persistent 2>/dev/null || true
|
||||
|
||||
# Verify it's connected
|
||||
docker inspect gitea-conan-persistent -f '{{range $net,$v := .NetworkSettings.Networks}}{{$net}} {{end}}'
|
||||
|
||||
if [[ -n "$BUILD_CONTAINERS" ]]; then
|
||||
echo "⚠️ WARNING: Some build containers are still running"
|
||||
echo "$BUILD_CONTAINERS"
|
||||
else
|
||||
echo "✅ No build containers found"
|
||||
fi
|
||||
fi
|
||||
# - name: Test Gitea from build container
|
||||
# run: |
|
||||
# # Show conan-net details
|
||||
# echo "=== Docker network 'conan-net' details ==="
|
||||
# docker network inspect conan-net
|
||||
#
|
||||
# # Show what networks Gitea is connected to
|
||||
# echo "=== Gitea container networks ==="
|
||||
# docker inspect gitea-conan-persistent -f '{{json .NetworkSettings.Networks}}' | python -m json.tool
|
||||
#
|
||||
# # Check if DNS resolution works without adding to conan-net
|
||||
# docker run --rm alpine nslookup gitea-conan-persistent || echo "⚠️ DNS resolution failed without conan-net"
|
||||
#
|
||||
# docker run --rm --network conan-net alpine sh -c "
|
||||
# # First verify connectivity works
|
||||
# apk add --no-cache curl >/dev/null 2>&1
|
||||
# echo 'Testing DNS resolution...'
|
||||
# nslookup gitea-conan-persistent
|
||||
# echo 'Testing HTTP connection...'
|
||||
# curl -s http://gitea-conan-persistent:3000 | head -n1
|
||||
# "
|
||||
# docker run --rm --network conan-net conanio/gcc11 bash -xec "
|
||||
# # Configure Conan using the resolved IP
|
||||
# conan remote add gitea-local http://gitea-conan-persistent:3000/api/packages/conan/conan
|
||||
# conan user -p conan-pass-2024 -r gitea-local conan
|
||||
#
|
||||
# # Enable revisions to match the server expectation
|
||||
# conan config set general.revisions_enabled=1
|
||||
#
|
||||
# # Test package upload/download
|
||||
# conan install zlib/1.3.1@ --build=zlib
|
||||
# conan upload 'zlib/*' --all -r gitea-local --confirm
|
||||
# conan remove 'zlib/*' -f
|
||||
# conan install zlib/1.3.1@ -r gitea-local
|
||||
# echo '✅ Container-to-container test successful!'# - name: Build using Docker
|
||||
# "
|
||||
# run: /bin/bash release-builder.sh
|
||||
#
|
||||
# - name: Stop Container (Cleanup)
|
||||
# if: always()
|
||||
# run: |
|
||||
# echo "Running cleanup script: $JOB_CLEANUP_SCRIPT"
|
||||
# /bin/bash -e -x "$JOB_CLEANUP_SCRIPT"
|
||||
# CLEANUP_EXIT_CODE=$?
|
||||
#
|
||||
# if [[ "$CLEANUP_EXIT_CODE" -eq 0 ]]; then
|
||||
# echo "Cleanup script succeeded."
|
||||
# rm -f "$JOB_CLEANUP_SCRIPT"
|
||||
# echo "Cleanup script removed."
|
||||
# else
|
||||
# echo "⚠️ Cleanup script failed! Keeping for debugging: $JOB_CLEANUP_SCRIPT"
|
||||
# fi
|
||||
#
|
||||
# if [[ "${DEBUG_BUILD_CONTAINERS_AFTER_CLEANUP}" == "1" ]]; then
|
||||
# echo "🔍 Checking for leftover containers..."
|
||||
# BUILD_CONTAINERS=$(docker ps --format '{{.Names}}' | grep '^xahaud_cached_builder' || echo "")
|
||||
#
|
||||
# if [[ -n "$BUILD_CONTAINERS" ]]; then
|
||||
# echo "⚠️ WARNING: Some build containers are still running"
|
||||
# echo "$BUILD_CONTAINERS"
|
||||
# else
|
||||
# echo "✅ No build containers found"
|
||||
# fi
|
||||
# fi
|
||||
|
||||
tests:
|
||||
runs-on: [self-hosted, vanity]
|
||||
@@ -91,7 +145,7 @@ jobs:
|
||||
working-directory: ${{ needs.checkout.outputs.checkout_path }}
|
||||
steps:
|
||||
- name: Unit tests
|
||||
run: /bin/bash docker-unit-tests.sh
|
||||
run: PATH="/root/.local/bin:$PATH" python .ci/gitea.py test --debug
|
||||
|
||||
cleanup:
|
||||
runs-on: [self-hosted, vanity]
|
||||
@@ -101,5 +155,6 @@ jobs:
|
||||
- name: Cleanup workspace
|
||||
run: |
|
||||
CHECKOUT_PATH="${{ needs.checkout.outputs.checkout_path }}"
|
||||
PATH="/root/.local/bin:$PATH" python "${CHECKOUT_PATH}/.ci/gitea.py" teardown --debug
|
||||
echo "Cleaning workspace for ${CHECKOUT_PATH}"
|
||||
rm -rf "${{ github.workspace }}/${CHECKOUT_PATH}"
|
||||
|
||||
@@ -129,12 +129,6 @@ class RCLConsensus
|
||||
return mode_;
|
||||
}
|
||||
|
||||
void
|
||||
setProposing()
|
||||
{
|
||||
mode_ = ConsensusMode::proposing;
|
||||
}
|
||||
|
||||
/** Called before kicking off a new consensus round.
|
||||
|
||||
@param prevLedger Ledger that will be prior ledger for next round
|
||||
@@ -471,12 +465,6 @@ public:
|
||||
return adaptor_.mode();
|
||||
}
|
||||
|
||||
void
|
||||
setProposing()
|
||||
{
|
||||
adaptor_.setProposing();
|
||||
}
|
||||
|
||||
ConsensusPhase
|
||||
phase() const
|
||||
{
|
||||
|
||||
@@ -212,7 +212,6 @@ LedgerMaster::getCurrentLedgerIndex()
|
||||
LedgerIndex
|
||||
LedgerMaster::getValidLedgerIndex()
|
||||
{
|
||||
std::cout << "getValidLedgerIndex: " << mValidLedgerSeq << "\n";
|
||||
return mValidLedgerSeq;
|
||||
}
|
||||
|
||||
|
||||
@@ -128,20 +128,4 @@ HashRouter::shouldRelay(uint256 const& key)
|
||||
return s.releasePeerSet();
|
||||
}
|
||||
|
||||
void
|
||||
HashRouter::setTouchedKeys(uint256 const& id, std::set<uint256>&& k)
|
||||
{
|
||||
std::unique_lock<std::shared_mutex> lock(touchedKeysMutex_);
|
||||
touchedKeysMap_.insert_or_assign(id, std::move(k));
|
||||
}
|
||||
|
||||
std::optional<std::reference_wrapper<const std::set<uint256>>>
|
||||
HashRouter::getTouchedKeys(uint256 const& id)
|
||||
{
|
||||
std::shared_lock<std::shared_mutex> lock(touchedKeysMutex_);
|
||||
if (auto it = touchedKeysMap_.find(id); it != touchedKeysMap_.end())
|
||||
return std::cref(it->second);
|
||||
return std::nullopt;
|
||||
}
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
@@ -27,8 +27,6 @@
|
||||
#include <ripple/beast/container/aged_unordered_map.h>
|
||||
|
||||
#include <optional>
|
||||
#include <set>
|
||||
#include <shared_mutex>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
@@ -197,12 +195,6 @@ public:
|
||||
int
|
||||
getFlags(uint256 const& key);
|
||||
|
||||
void
|
||||
setTouchedKeys(uint256 const& id, std::set<uint256>&& k);
|
||||
|
||||
std::optional<std::reference_wrapper<const std::set<uint256>>>
|
||||
getTouchedKeys(uint256 const& id);
|
||||
|
||||
/** Determines whether the hashed item should be relayed.
|
||||
|
||||
Effects:
|
||||
@@ -225,9 +217,6 @@ private:
|
||||
|
||||
std::mutex mutable mutex_;
|
||||
|
||||
mutable std::shared_mutex touchedKeysMutex_;
|
||||
std::map<uint256, std::set<uint256>> touchedKeysMap_;
|
||||
|
||||
// Stores all suppressed hashes and their expiration time
|
||||
beast::aged_unordered_map<
|
||||
uint256,
|
||||
|
||||
@@ -944,13 +944,7 @@ NetworkOPsImp::processHeartbeatTimer()
|
||||
// do we have sufficient peers? If not, we are disconnected.
|
||||
if (numPeers < minPeerCount_)
|
||||
{
|
||||
if (app_.config().NETWORK_ID == 65534)
|
||||
{
|
||||
// replay network is always considered to be connected
|
||||
// ensuring that it actually is is up to the tester
|
||||
setMode(OperatingMode::FULL);
|
||||
}
|
||||
else if (mMode != OperatingMode::DISCONNECTED)
|
||||
if (mMode != OperatingMode::DISCONNECTED)
|
||||
{
|
||||
setMode(OperatingMode::DISCONNECTED);
|
||||
JLOG(m_journal.warn())
|
||||
@@ -1803,13 +1797,6 @@ NetworkOPsImp::beginConsensus(uint256 const& networkClosed)
|
||||
{
|
||||
assert(networkClosed.isNonZero());
|
||||
|
||||
if (app_.config().NETWORK_ID == 65534)
|
||||
{
|
||||
// replay network automatically goes to proposing
|
||||
setMode(OperatingMode::FULL);
|
||||
mConsensus.setProposing();
|
||||
}
|
||||
|
||||
auto closingInfo = m_ledgerMaster.getCurrentLedger()->info();
|
||||
|
||||
JLOG(m_journal.info()) << "Consensus time for #" << closingInfo.seq
|
||||
|
||||
@@ -29,7 +29,6 @@
|
||||
#include <ripple/protocol/TxMeta.h>
|
||||
#include <boost/optional.hpp>
|
||||
#include <optional>
|
||||
#include <set>
|
||||
#include <variant>
|
||||
|
||||
namespace ripple {
|
||||
@@ -407,8 +406,6 @@ private:
|
||||
std::shared_ptr<STTx const> mTransaction;
|
||||
Application& mApp;
|
||||
beast::Journal j_;
|
||||
|
||||
std::set<uint256> keysTouched;
|
||||
};
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
@@ -30,7 +30,6 @@
|
||||
#include <boost/circular_buffer.hpp>
|
||||
#include <boost/intrusive/set.hpp>
|
||||
#include <optional>
|
||||
#include <set>
|
||||
#include <vector>
|
||||
|
||||
namespace ripple {
|
||||
@@ -106,13 +105,13 @@ public:
|
||||
FeeLevel64 minimumEscalationMultiplier = baseLevel * 500;
|
||||
/// Minimum number of transactions to allow into the ledger
|
||||
/// before escalation, regardless of the prior ledger's size.
|
||||
std::uint32_t minimumTxnInLedger = 5000;
|
||||
std::uint32_t minimumTxnInLedger = 32;
|
||||
/// Like @ref minimumTxnInLedger for standalone mode.
|
||||
/// Primarily so that tests don't need to worry about queuing.
|
||||
std::uint32_t minimumTxnInLedgerSA = 1000;
|
||||
/// Number of transactions per ledger that fee escalation "works
|
||||
/// towards".
|
||||
std::uint32_t targetTxnInLedger = 10000;
|
||||
std::uint32_t targetTxnInLedger = 1000;
|
||||
/** Optional maximum allowed value of transactions per ledger before
|
||||
fee escalation kicks in. By default, the maximum is an emergent
|
||||
property of network, validator, and consensus performance. This
|
||||
@@ -742,7 +741,6 @@ private:
|
||||
FeeMetrics::Snapshot const& metricsSnapshot,
|
||||
std::lock_guard<std::mutex> const& lock) const;
|
||||
|
||||
public:
|
||||
// Helper function for TxQ::apply. If a transaction's fee is high enough,
|
||||
// attempt to directly apply that transaction to the ledger.
|
||||
std::optional<std::pair<TER, bool>>
|
||||
@@ -753,7 +751,6 @@ public:
|
||||
ApplyFlags flags,
|
||||
beast::Journal j);
|
||||
|
||||
private:
|
||||
// Helper function that removes a replaced entry in _byFee.
|
||||
std::optional<TxQAccount::TxMap::iterator>
|
||||
removeFromByFee(
|
||||
|
||||
@@ -34,270 +34,8 @@
|
||||
#include <ripple/protocol/jss.h>
|
||||
#include <ripple/rpc/CTID.h>
|
||||
|
||||
#include <algorithm>
|
||||
#include <atomic>
|
||||
#include <chrono>
|
||||
#include <iomanip>
|
||||
#include <thread>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
#define ENABLE_PERFORMANCE_TRACKING 0
|
||||
|
||||
namespace ripple {
|
||||
|
||||
#if ENABLE_PERFORMANCE_TRACKING
|
||||
|
||||
// Performance monitoring statistics
|
||||
namespace {
|
||||
// Design: Uses thread-local storage for most stats to avoid contention.
|
||||
// Only global concurrency tracking uses atomics, as it requires cross-thread
|
||||
// visibility. Statistics are aggregated using dirty reads for minimal
|
||||
// performance impact.
|
||||
|
||||
// Thread-local statistics - no synchronization needed!
|
||||
struct ThreadLocalStats
|
||||
{
|
||||
uint64_t executionCount = 0;
|
||||
uint64_t totalTimeNanos = 0;
|
||||
uint64_t totalKeys = 0;
|
||||
uint32_t currentlyExecuting = 0; // 0 or 1 for this thread
|
||||
std::thread::id threadId = std::this_thread::get_id();
|
||||
|
||||
// For global registry
|
||||
ThreadLocalStats* next = nullptr;
|
||||
|
||||
ThreadLocalStats();
|
||||
~ThreadLocalStats();
|
||||
};
|
||||
|
||||
// Global registry of thread-local stats (only modified during thread
|
||||
// creation/destruction)
|
||||
struct GlobalRegistry
|
||||
{
|
||||
std::atomic<ThreadLocalStats*> head{nullptr};
|
||||
std::atomic<uint64_t> globalExecutions{0};
|
||||
std::atomic<uint32_t> globalConcurrent{
|
||||
0}; // Current global concurrent executions
|
||||
std::atomic<uint32_t> maxGlobalConcurrent{0}; // Max observed
|
||||
|
||||
// For tracking concurrency samples
|
||||
std::vector<uint32_t> concurrencySamples;
|
||||
std::mutex sampleMutex; // Only used during printing
|
||||
|
||||
std::chrono::steady_clock::time_point startTime =
|
||||
std::chrono::steady_clock::now();
|
||||
std::chrono::steady_clock::time_point lastPrintTime =
|
||||
std::chrono::steady_clock::now();
|
||||
|
||||
static constexpr auto PRINT_INTERVAL = std::chrono::seconds(10);
|
||||
static constexpr uint64_t PRINT_EVERY_N_CALLS = 1000;
|
||||
|
||||
void
|
||||
registerThread(ThreadLocalStats* stats)
|
||||
{
|
||||
// Add to linked list atomically
|
||||
ThreadLocalStats* oldHead = head.load();
|
||||
do
|
||||
{
|
||||
stats->next = oldHead;
|
||||
} while (!head.compare_exchange_weak(oldHead, stats));
|
||||
}
|
||||
|
||||
void
|
||||
unregisterThread(ThreadLocalStats* stats)
|
||||
{
|
||||
// In production, you'd want proper removal logic
|
||||
// For this example, we'll just leave it in the list
|
||||
// (threads typically live for the process lifetime anyway)
|
||||
}
|
||||
|
||||
void
|
||||
checkAndPrint(uint64_t localCount)
|
||||
{
|
||||
// Update approximate global count
|
||||
uint64_t approxGlobal =
|
||||
globalExecutions.fetch_add(localCount) + localCount;
|
||||
|
||||
auto now = std::chrono::steady_clock::now();
|
||||
if (approxGlobal % PRINT_EVERY_N_CALLS < localCount ||
|
||||
(now - lastPrintTime) >= PRINT_INTERVAL)
|
||||
{
|
||||
// Only one thread prints at a time
|
||||
static std::atomic<bool> printing{false};
|
||||
bool expected = false;
|
||||
if (printing.compare_exchange_strong(expected, true))
|
||||
{
|
||||
// Double-check timing
|
||||
now = std::chrono::steady_clock::now();
|
||||
if ((now - lastPrintTime) >= PRINT_INTERVAL)
|
||||
{
|
||||
printStats();
|
||||
lastPrintTime = now;
|
||||
}
|
||||
printing = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
printStats()
|
||||
{
|
||||
// Dirty read of all thread-local stats
|
||||
uint64_t totalExecs = 0;
|
||||
uint64_t totalNanos = 0;
|
||||
uint64_t totalKeyCount = 0;
|
||||
uint32_t currentConcurrent = globalConcurrent.load();
|
||||
uint32_t maxConcurrent = maxGlobalConcurrent.load();
|
||||
std::unordered_map<
|
||||
std::thread::id,
|
||||
std::tuple<uint64_t, uint64_t, uint64_t>>
|
||||
threadData;
|
||||
|
||||
// Walk the linked list of thread-local stats
|
||||
ThreadLocalStats* current = head.load();
|
||||
while (current)
|
||||
{
|
||||
// Dirty reads - no synchronization!
|
||||
uint64_t execs = current->executionCount;
|
||||
if (execs > 0)
|
||||
{
|
||||
uint64_t nanos = current->totalTimeNanos;
|
||||
uint64_t keys = current->totalKeys;
|
||||
|
||||
totalExecs += execs;
|
||||
totalNanos += nanos;
|
||||
totalKeyCount += keys;
|
||||
|
||||
threadData[current->threadId] = {execs, nanos, keys};
|
||||
}
|
||||
current = current->next;
|
||||
}
|
||||
|
||||
if (totalExecs == 0)
|
||||
return;
|
||||
|
||||
double avgTimeUs =
|
||||
static_cast<double>(totalNanos) / totalExecs / 1000.0;
|
||||
double avgKeys = static_cast<double>(totalKeyCount) / totalExecs;
|
||||
double totalTimeMs = static_cast<double>(totalNanos) / 1000000.0;
|
||||
|
||||
// Calculate wall clock time elapsed
|
||||
auto now = std::chrono::steady_clock::now();
|
||||
auto wallTimeMs = std::chrono::duration_cast<std::chrono::milliseconds>(
|
||||
now - startTime)
|
||||
.count();
|
||||
double effectiveParallelism = wallTimeMs > 0
|
||||
? totalTimeMs / static_cast<double>(wallTimeMs)
|
||||
: 0.0;
|
||||
|
||||
std::cout
|
||||
<< "\n=== Transaction::tryDirectApply Performance Stats ===\n";
|
||||
std::cout << "Total executions: ~" << totalExecs << " (dirty read)\n";
|
||||
std::cout << "Wall clock time: " << wallTimeMs << " ms\n";
|
||||
std::cout << "Total CPU time: " << std::fixed << std::setprecision(2)
|
||||
<< totalTimeMs << " ms\n";
|
||||
std::cout << "Effective parallelism: " << std::fixed
|
||||
<< std::setprecision(2) << effectiveParallelism << "x\n";
|
||||
std::cout << "Average time: " << std::fixed << std::setprecision(2)
|
||||
<< avgTimeUs << " μs\n";
|
||||
std::cout << "Average keys touched: " << std::fixed
|
||||
<< std::setprecision(2) << avgKeys << "\n";
|
||||
std::cout << "Current concurrent executions: " << currentConcurrent
|
||||
<< "\n";
|
||||
std::cout << "Max concurrent observed: " << maxConcurrent << "\n";
|
||||
std::cout << "Active threads: " << threadData.size() << "\n";
|
||||
|
||||
std::cout << "Thread distribution:\n";
|
||||
|
||||
// Sort threads by total time spent (descending)
|
||||
std::vector<std::pair<
|
||||
std::thread::id,
|
||||
std::tuple<uint64_t, uint64_t, uint64_t>>>
|
||||
sortedThreads(threadData.begin(), threadData.end());
|
||||
std::sort(
|
||||
sortedThreads.begin(),
|
||||
sortedThreads.end(),
|
||||
[](const auto& a, const auto& b) {
|
||||
return std::get<1>(a.second) >
|
||||
std::get<1>(b.second); // Sort by time
|
||||
});
|
||||
|
||||
for (const auto& [tid, data] : sortedThreads)
|
||||
{
|
||||
auto [count, time, keys] = data;
|
||||
double percentage =
|
||||
(static_cast<double>(count) / totalExecs) * 100.0;
|
||||
double avgThreadTimeUs = static_cast<double>(time) / count / 1000.0;
|
||||
double totalThreadTimeMs = static_cast<double>(time) / 1000000.0;
|
||||
double timePercentage =
|
||||
(static_cast<double>(time) / totalNanos) * 100.0;
|
||||
std::cout << " Thread " << tid << ": " << count << " executions ("
|
||||
<< std::fixed << std::setprecision(1) << percentage
|
||||
<< "%), total " << std::setprecision(2)
|
||||
<< totalThreadTimeMs << " ms (" << std::setprecision(1)
|
||||
<< timePercentage << "% of time), avg "
|
||||
<< std::setprecision(2) << avgThreadTimeUs << " μs\n";
|
||||
}
|
||||
|
||||
std::cout << "Hardware concurrency: "
|
||||
<< std::thread::hardware_concurrency() << "\n";
|
||||
std::cout << "===================================================\n\n";
|
||||
std::cout.flush();
|
||||
}
|
||||
};
|
||||
|
||||
static GlobalRegistry globalRegistry;
|
||||
|
||||
// Thread-local instance
|
||||
thread_local ThreadLocalStats tlStats;
|
||||
|
||||
// Constructor/destructor for thread registration
|
||||
ThreadLocalStats::ThreadLocalStats()
|
||||
{
|
||||
globalRegistry.registerThread(this);
|
||||
}
|
||||
|
||||
ThreadLocalStats::~ThreadLocalStats()
|
||||
{
|
||||
globalRegistry.unregisterThread(this);
|
||||
}
|
||||
|
||||
// RAII class to track concurrent executions (global)
|
||||
class ConcurrentExecutionTracker
|
||||
{
|
||||
// Note: This introduces minimal atomic contention to track true global
|
||||
// concurrency. The alternative would miss concurrent executions between
|
||||
// print intervals.
|
||||
public:
|
||||
ConcurrentExecutionTracker()
|
||||
{
|
||||
tlStats.currentlyExecuting = 1;
|
||||
|
||||
// Update global concurrent count
|
||||
uint32_t current = globalRegistry.globalConcurrent.fetch_add(1) + 1;
|
||||
|
||||
// Update max if needed (only contends when setting new maximum)
|
||||
uint32_t currentMax = globalRegistry.maxGlobalConcurrent.load();
|
||||
while (current > currentMax &&
|
||||
!globalRegistry.maxGlobalConcurrent.compare_exchange_weak(
|
||||
currentMax, current))
|
||||
{
|
||||
// Loop until we successfully update or current is no longer >
|
||||
// currentMax
|
||||
}
|
||||
}
|
||||
|
||||
~ConcurrentExecutionTracker()
|
||||
{
|
||||
tlStats.currentlyExecuting = 0;
|
||||
globalRegistry.globalConcurrent.fetch_sub(1);
|
||||
}
|
||||
};
|
||||
} // namespace
|
||||
|
||||
#endif // ENABLE_PERFORMANCE_TRACKING
|
||||
|
||||
Transaction::Transaction(
|
||||
std::shared_ptr<STTx const> const& stx,
|
||||
std::string& reason,
|
||||
@@ -307,38 +45,6 @@ Transaction::Transaction(
|
||||
try
|
||||
{
|
||||
mTransactionID = mTransaction->getTransactionID();
|
||||
|
||||
OpenView sandbox(*app.openLedger().current());
|
||||
|
||||
sandbox.getAndResetKeysTouched();
|
||||
|
||||
ApplyFlags flags{0};
|
||||
|
||||
#if ENABLE_PERFORMANCE_TRACKING
|
||||
ConcurrentExecutionTracker concurrentTracker;
|
||||
auto startTime = std::chrono::steady_clock::now();
|
||||
#endif
|
||||
|
||||
if (auto directApplied =
|
||||
app.getTxQ().tryDirectApply(app, sandbox, stx, flags, j_))
|
||||
keysTouched = sandbox.getAndResetKeysTouched();
|
||||
|
||||
#if ENABLE_PERFORMANCE_TRACKING
|
||||
auto endTime = std::chrono::steady_clock::now();
|
||||
auto elapsedNanos =
|
||||
std::chrono::duration_cast<std::chrono::nanoseconds>(
|
||||
endTime - startTime)
|
||||
.count();
|
||||
|
||||
tlStats.executionCount++;
|
||||
tlStats.totalTimeNanos += elapsedNanos;
|
||||
tlStats.totalKeys += keysTouched.size();
|
||||
|
||||
if (tlStats.executionCount % 100 == 0)
|
||||
{
|
||||
globalRegistry.checkAndPrint(100);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
catch (std::exception& e)
|
||||
{
|
||||
|
||||
@@ -30,7 +30,6 @@
|
||||
#include <algorithm>
|
||||
#include <limits>
|
||||
#include <numeric>
|
||||
#include <set>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
@@ -740,20 +739,10 @@ TxQ::apply(
|
||||
STAmountSO stAmountSO{view.rules().enabled(fixSTAmountCanonicalize)};
|
||||
NumberSO stNumberSO{view.rules().enabled(fixUniversalNumber)};
|
||||
|
||||
auto const transactionID = tx->getTransactionID();
|
||||
|
||||
// See if the transaction paid a high enough fee that it can go straight
|
||||
// into the ledger.
|
||||
|
||||
view.getAndResetKeysTouched();
|
||||
if (auto directApplied = tryDirectApply(app, view, tx, flags, j))
|
||||
{
|
||||
app.getHashRouter().setTouchedKeys(
|
||||
transactionID, view.getAndResetKeysTouched());
|
||||
return *directApplied;
|
||||
}
|
||||
|
||||
return {telCAN_NOT_QUEUE, false};
|
||||
|
||||
// If we get past tryDirectApply() without returning then we expect
|
||||
// one of the following to occur:
|
||||
@@ -769,47 +758,6 @@ TxQ::apply(
|
||||
if (!isTesSuccess(pfresult.ter))
|
||||
return {pfresult.ter, false};
|
||||
|
||||
bool const isReplayNetwork = (app.config().NETWORK_ID == 65534);
|
||||
|
||||
if (isReplayNetwork)
|
||||
{
|
||||
// in the replay network everything is always queued no matter what
|
||||
|
||||
std::lock_guard lock(mutex_);
|
||||
auto const metricsSnapshot = feeMetrics_.getSnapshot();
|
||||
auto const feeLevelPaid =
|
||||
getRequiredFeeLevel(view, flags, metricsSnapshot, lock);
|
||||
|
||||
auto const account = (*tx)[sfAccount];
|
||||
AccountMap::iterator accountIter = byAccount_.find(account);
|
||||
bool const accountIsInQueue = accountIter != byAccount_.end();
|
||||
|
||||
if (!accountIsInQueue)
|
||||
{
|
||||
// Create a new TxQAccount object and add the byAccount lookup.
|
||||
bool created;
|
||||
std::tie(accountIter, created) =
|
||||
byAccount_.emplace(account, TxQAccount(tx));
|
||||
(void)created;
|
||||
assert(created);
|
||||
}
|
||||
|
||||
flags &= ~tapRETRY;
|
||||
|
||||
auto& candidate = accountIter->second.add(
|
||||
{tx, transactionID, feeLevelPaid, flags, pfresult});
|
||||
|
||||
// Then index it into the byFee lookup.
|
||||
byFee_.insert(candidate);
|
||||
JLOG(j_.debug()) << "Added transaction " << candidate.txID
|
||||
<< " with result " << transToken(pfresult.ter)
|
||||
<< " from " << (accountIsInQueue ? "existing" : "new")
|
||||
<< " account " << candidate.account << " to queue."
|
||||
<< " Flags: " << flags;
|
||||
|
||||
return {terQUEUED, false};
|
||||
}
|
||||
|
||||
// If the account is not currently in the ledger, don't queue its tx.
|
||||
auto const account = (*tx)[sfAccount];
|
||||
Keylet const accountKey{keylet::account(account)};
|
||||
@@ -893,6 +841,7 @@ TxQ::apply(
|
||||
// is allowed in the TxQ:
|
||||
// 1. If the account's queue is empty or
|
||||
// 2. If the blocker replaces the only entry in the account's queue.
|
||||
auto const transactionID = tx->getTransactionID();
|
||||
if (pfresult.consequences.isBlocker())
|
||||
{
|
||||
if (acctTxCount > 1)
|
||||
@@ -1199,11 +1148,11 @@ TxQ::apply(
|
||||
(potentialTotalSpend == XRPAmount{0} &&
|
||||
multiTxn->applyView.fees().base == 0));
|
||||
sleBump->setFieldAmount(sfBalance, balance - potentialTotalSpend);
|
||||
// The transaction's sequence/ticket will be valid when the
|
||||
// other transactions in the queue have been processed. If the
|
||||
// tx has a sequence, set the account to match it. If it has a
|
||||
// ticket, use the next queueable sequence, which is the closest
|
||||
// approximation to the most successful case.
|
||||
// The transaction's sequence/ticket will be valid when the other
|
||||
// transactions in the queue have been processed. If the tx has a
|
||||
// sequence, set the account to match it. If it has a ticket, use
|
||||
// the next queueable sequence, which is the closest approximation
|
||||
// to the most successful case.
|
||||
sleBump->at(sfSequence) = txSeqProx.isSeq()
|
||||
? txSeqProx.value()
|
||||
: nextQueuableSeqImpl(sleAccount, lock).value();
|
||||
@@ -1258,8 +1207,6 @@ TxQ::apply(
|
||||
{
|
||||
OpenView sandbox(open_ledger, &view, view.rules());
|
||||
|
||||
sandbox.getAndResetKeysTouched();
|
||||
|
||||
auto result = tryClearAccountQueueUpThruTx(
|
||||
app,
|
||||
sandbox,
|
||||
@@ -1272,10 +1219,6 @@ TxQ::apply(
|
||||
flags,
|
||||
metricsSnapshot,
|
||||
j);
|
||||
|
||||
app.getHashRouter().setTouchedKeys(
|
||||
transactionID, sandbox.getAndResetKeysTouched());
|
||||
|
||||
if (result.second)
|
||||
{
|
||||
sandbox.apply(view);
|
||||
@@ -1714,16 +1657,11 @@ TxQ::accept(Application& app, OpenView& view)
|
||||
JLOG(j_.trace()) << "Applying queued transaction "
|
||||
<< candidateIter->txID << " to open ledger.";
|
||||
|
||||
view.getAndResetKeysTouched();
|
||||
|
||||
auto const [txnResult, didApply] =
|
||||
candidateIter->apply(app, view, j_);
|
||||
|
||||
if (didApply)
|
||||
{
|
||||
app.getHashRouter().setTouchedKeys(
|
||||
candidateIter->txID, view.getAndResetKeysTouched());
|
||||
|
||||
// Remove the candidate from the queue
|
||||
JLOG(j_.debug())
|
||||
<< "Queued transaction " << candidateIter->txID
|
||||
@@ -1930,15 +1868,13 @@ TxQ::tryDirectApply(
|
||||
const bool isFirstImport = !sleAccount &&
|
||||
view.rules().enabled(featureImport) && tx->getTxnType() == ttIMPORT;
|
||||
|
||||
bool const isReplayNetwork = (app.config().NETWORK_ID == 65534);
|
||||
|
||||
// Don't attempt to direct apply if the account is not in the ledger.
|
||||
if (!sleAccount && !isFirstImport && !isReplayNetwork)
|
||||
if (!sleAccount && !isFirstImport)
|
||||
return {};
|
||||
|
||||
std::optional<SeqProxy> txSeqProx;
|
||||
|
||||
if (!isFirstImport && !isReplayNetwork)
|
||||
if (!isFirstImport)
|
||||
{
|
||||
SeqProxy const acctSeqProx =
|
||||
SeqProxy::sequence((*sleAccount)[sfSequence]);
|
||||
@@ -1951,7 +1887,7 @@ TxQ::tryDirectApply(
|
||||
}
|
||||
|
||||
FeeLevel64 const requiredFeeLevel =
|
||||
(isFirstImport || isReplayNetwork) ? FeeLevel64{0} : [this, &view, flags]() {
|
||||
isFirstImport ? FeeLevel64{0} : [this, &view, flags]() {
|
||||
std::lock_guard lock(mutex_);
|
||||
return getRequiredFeeLevel(
|
||||
view, flags, feeMetrics_.getSnapshot(), lock);
|
||||
@@ -1961,7 +1897,7 @@ TxQ::tryDirectApply(
|
||||
// transaction straight into the ledger.
|
||||
FeeLevel64 const feeLevelPaid = getFeeLevelPaid(view, *tx);
|
||||
|
||||
if (feeLevelPaid >= requiredFeeLevel || isReplayNetwork)
|
||||
if (feeLevelPaid >= requiredFeeLevel)
|
||||
{
|
||||
// Attempt to apply the transaction directly.
|
||||
auto const transactionID = tx->getTransactionID();
|
||||
|
||||
@@ -458,24 +458,11 @@ Change::activateXahauGenesis()
|
||||
bool const isTest =
|
||||
(ctx_.tx.getFlags() & tfTestSuite) && ctx_.app.config().standalone();
|
||||
|
||||
// RH NOTE: we'll only configure xahau governance structure on certain
|
||||
// network ids
|
||||
|
||||
const auto nid = ctx_.app.config().NETWORK_ID;
|
||||
|
||||
if (nid >= 65520)
|
||||
{
|
||||
// networks 65520 - 65535 are are also configured as xahau gov
|
||||
}
|
||||
else if (isTest)
|
||||
{
|
||||
// test is configured like this too
|
||||
}
|
||||
else if (nid / 10 == 2133)
|
||||
{
|
||||
// networks 2133X are the valid xahau prod dev and testnets
|
||||
}
|
||||
else
|
||||
// RH NOTE: we'll only configure xahau governance structure on networks that
|
||||
// begin with 2133... so production xahau: 21337 and its testnet 21338
|
||||
// with 21330-21336 and 21339 also valid and reserved for dev nets etc.
|
||||
// all other Network IDs will be conventionally configured.
|
||||
if ((ctx_.app.config().NETWORK_ID / 10) != 2133 && !isTest)
|
||||
return;
|
||||
|
||||
auto [ng_entries, l1_entries, l2_entries, gov_params] =
|
||||
|
||||
@@ -167,9 +167,6 @@ Import::preflight(PreflightContext const& ctx)
|
||||
if (!xpop)
|
||||
return temMALFORMED;
|
||||
|
||||
if (ctx.app.config().NETWORK_ID == 65534 /* replay network */)
|
||||
return tesSUCCESS;
|
||||
|
||||
// we will check if we recognise the vl key in preclaim because it may be
|
||||
// from on-ledger object
|
||||
std::optional<PublicKey> masterVLKey;
|
||||
@@ -273,9 +270,7 @@ Import::preflight(PreflightContext const& ctx)
|
||||
return temMALFORMED;
|
||||
}
|
||||
|
||||
const auto nid = ctx.app.config().NETWORK_ID;
|
||||
if (stpTrans->getFieldU32(sfOperationLimit) != nid &&
|
||||
nid != 65534 /* replay network */)
|
||||
if (stpTrans->getFieldU32(sfOperationLimit) != ctx.app.config().NETWORK_ID)
|
||||
{
|
||||
JLOG(ctx.j.warn()) << "Import: Wrong network ID for OperationLimit in "
|
||||
"inner txn. outer txid: "
|
||||
@@ -1312,8 +1307,8 @@ Import::doApply()
|
||||
view().rules().enabled(featureXahauGenesis)
|
||||
? view().info().parentCloseTime.time_since_epoch().count()
|
||||
: view().rules().enabled(featureDeletableAccounts)
|
||||
? view().seq()
|
||||
: 1};
|
||||
? view().seq()
|
||||
: 1};
|
||||
|
||||
sle = std::make_shared<SLE>(keylet::account(id));
|
||||
sle->setAccountID(sfAccount, id);
|
||||
|
||||
@@ -67,16 +67,11 @@ preflight0(PreflightContext const& ctx)
|
||||
else
|
||||
{
|
||||
// new networks both require the field to be present and require it
|
||||
// to match, except for some special networks
|
||||
|
||||
if (nodeNID == 65534 /* replay network */)
|
||||
{
|
||||
// on the replay network any other network's transactions can be
|
||||
// replayed last ledger sequence is also ignored on this network
|
||||
}
|
||||
else if (!txNID)
|
||||
// to match
|
||||
if (!txNID)
|
||||
return telREQUIRES_NETWORK_ID;
|
||||
else if (*txNID != nodeNID)
|
||||
|
||||
if (*txNID != nodeNID)
|
||||
return telWRONG_NETWORK;
|
||||
}
|
||||
}
|
||||
@@ -120,11 +115,8 @@ preflight1(PreflightContext const& ctx)
|
||||
auto const fee = ctx.tx.getFieldAmount(sfFee);
|
||||
if (!fee.native() || fee.negative() || !isLegalAmount(fee.xrp()))
|
||||
{
|
||||
if (ctx.app.config().NETWORK_ID != 65534 /* replay network */)
|
||||
{
|
||||
JLOG(ctx.j.debug()) << "preflight1: invalid fee";
|
||||
return temBAD_FEE;
|
||||
}
|
||||
JLOG(ctx.j.debug()) << "preflight1: invalid fee";
|
||||
return temBAD_FEE;
|
||||
}
|
||||
|
||||
// if a hook emitted this transaction we bypass signature checks
|
||||
@@ -440,10 +432,6 @@ Transactor::minimumFee(
|
||||
TER
|
||||
Transactor::checkFee(PreclaimContext const& ctx, XRPAmount baseFee)
|
||||
{
|
||||
// on the replay network fees are unimportant
|
||||
if (ctx.app.config().NETWORK_ID == 65534 /* replay network */)
|
||||
return tesSUCCESS;
|
||||
|
||||
if (!ctx.tx[sfFee].native())
|
||||
return temBAD_FEE;
|
||||
|
||||
@@ -485,7 +473,6 @@ Transactor::checkFee(PreclaimContext const& ctx, XRPAmount baseFee)
|
||||
"a fee and an existing account.";
|
||||
}
|
||||
}
|
||||
std::cout << "transactor 485 NO_ACCOUNT\n";
|
||||
return terNO_ACCOUNT;
|
||||
}
|
||||
|
||||
@@ -557,7 +544,6 @@ Transactor::checkSeqProxy(
|
||||
JLOG(j.trace())
|
||||
<< "applyTransaction: delay: source account does not exist "
|
||||
<< toBase58(id);
|
||||
std::cout << "transactor 557 NO_ACCOUNT\n";
|
||||
return terNO_ACCOUNT;
|
||||
}
|
||||
|
||||
@@ -644,7 +630,6 @@ Transactor::checkPriorTxAndLastLedger(PreclaimContext const& ctx)
|
||||
JLOG(ctx.j.trace())
|
||||
<< "applyTransaction: delay: source account does not exist "
|
||||
<< toBase58(id);
|
||||
std::cout << "transactor 644 NO_ACCOUNT\n";
|
||||
return terNO_ACCOUNT;
|
||||
}
|
||||
|
||||
@@ -656,18 +641,9 @@ Transactor::checkPriorTxAndLastLedger(PreclaimContext const& ctx)
|
||||
return tefWRONG_PRIOR;
|
||||
}
|
||||
|
||||
uint32_t nodeNID = ctx.app.config().NETWORK_ID;
|
||||
|
||||
if (ctx.tx.isFieldPresent(sfLastLedgerSequence) &&
|
||||
(ctx.view.seq() > ctx.tx.getFieldU32(sfLastLedgerSequence)))
|
||||
{
|
||||
if (ctx.app.config().NETWORK_ID == 65534)
|
||||
{
|
||||
// on the replay network lls is ignored to allow txns to be replayed
|
||||
}
|
||||
else
|
||||
return tefMAX_LEDGER;
|
||||
}
|
||||
return tefMAX_LEDGER;
|
||||
|
||||
if (ctx.view.txExists(ctx.tx.getTransactionID()))
|
||||
return tefALREADY;
|
||||
@@ -802,14 +778,12 @@ Transactor::apply()
|
||||
|
||||
// If the transactor requires a valid account and the transaction doesn't
|
||||
// list one, preflight will have already a flagged a failure.
|
||||
auto sle = view().peek(keylet::account(account_));
|
||||
|
||||
const bool isReplayNetwork = (ctx_.app.config().NETWORK_ID == 65534);
|
||||
auto const sle = view().peek(keylet::account(account_));
|
||||
|
||||
// sle must exist except for transactions
|
||||
// that allow zero account. (and ttIMPORT)
|
||||
assert(
|
||||
sle != nullptr || account_ == beast::zero || isReplayNetwork ||
|
||||
sle != nullptr || account_ == beast::zero ||
|
||||
view().rules().enabled(featureImport) &&
|
||||
ctx_.tx.getTxnType() == ttIMPORT &&
|
||||
!ctx_.tx.isFieldPresent(sfIssuer));
|
||||
@@ -832,39 +806,6 @@ Transactor::apply()
|
||||
|
||||
view().update(sle);
|
||||
}
|
||||
else if (isReplayNetwork)
|
||||
{
|
||||
// create missing acc for replay network
|
||||
// Create the account.
|
||||
std::uint32_t const seqno{
|
||||
view().rules().enabled(featureXahauGenesis)
|
||||
? view().info().parentCloseTime.time_since_epoch().count()
|
||||
: view().rules().enabled(featureDeletableAccounts)
|
||||
? view().seq()
|
||||
: 1};
|
||||
|
||||
sle = std::make_shared<SLE>(keylet::account(account_));
|
||||
sle->setAccountID(sfAccount, account_);
|
||||
|
||||
sle->setFieldU32(sfSequence, seqno);
|
||||
sle->setFieldU32(sfOwnerCount, 0);
|
||||
|
||||
if (view().exists(keylet::fees()) &&
|
||||
view().rules().enabled(featureXahauGenesis))
|
||||
{
|
||||
auto sleFees = view().peek(keylet::fees());
|
||||
uint64_t accIdx = sleFees->isFieldPresent(sfAccountCount)
|
||||
? sleFees->getFieldU64(sfAccountCount)
|
||||
: 0;
|
||||
sle->setFieldU64(sfAccountIndex, accIdx);
|
||||
sleFees->setFieldU64(sfAccountCount, accIdx + 1);
|
||||
view().update(sleFees);
|
||||
}
|
||||
|
||||
// we'll fix this up at the end
|
||||
sle->setFieldAmount(sfBalance, STAmount{XRPAmount{100}});
|
||||
view().insert(sle);
|
||||
}
|
||||
|
||||
return doApply();
|
||||
}
|
||||
@@ -887,7 +828,7 @@ Transactor::checkSign(PreclaimContext const& ctx)
|
||||
|
||||
// wildcard network gets a free pass on all signatures
|
||||
if (ctx.tx.isFieldPresent(sfNetworkID) &&
|
||||
ctx.tx.getFieldU32(sfNetworkID) >= 65534)
|
||||
ctx.tx.getFieldU32(sfNetworkID) == 65535)
|
||||
return tesSUCCESS;
|
||||
|
||||
// pass ttIMPORTs, their signatures are checked at the preflight against the
|
||||
@@ -921,18 +862,7 @@ Transactor::checkSingleSign(PreclaimContext const& ctx)
|
||||
auto const sleAccount = ctx.view.read(keylet::account(idAccount));
|
||||
|
||||
if (!sleAccount)
|
||||
{
|
||||
std::cout << "transactor 922 NO_ACCOUNT\n";
|
||||
|
||||
if (ctx.app.config().NETWORK_ID == 65534)
|
||||
{
|
||||
// replay network allows transactions to create missing accounts
|
||||
// implicitly and in this event we will just pass the txn
|
||||
return tesSUCCESS;
|
||||
}
|
||||
else
|
||||
return terNO_ACCOUNT;
|
||||
}
|
||||
return terNO_ACCOUNT;
|
||||
|
||||
bool const isMasterDisabled = sleAccount->isFlag(lsfDisableMaster);
|
||||
|
||||
@@ -1998,9 +1928,7 @@ Transactor::operator()()
|
||||
{
|
||||
// Check invariants: if `tecINVARIANT_FAILED` is not returned, we can
|
||||
// proceed to apply the tx
|
||||
|
||||
if (ctx_.app.config().NETWORK_ID != 65534)
|
||||
result = ctx_.checkInvariants(result, fee);
|
||||
result = ctx_.checkInvariants(result, fee);
|
||||
|
||||
if (result == tecINVARIANT_FAILED)
|
||||
{
|
||||
|
||||
@@ -199,19 +199,14 @@ invoke_preclaim(PreclaimContext const& ctx)
|
||||
// list one, preflight will have already a flagged a failure.
|
||||
auto const id = ctx.tx.getAccountID(sfAccount);
|
||||
|
||||
bool const isReplayNetwork = (ctx.app.config().NETWORK_ID == 65534);
|
||||
|
||||
if (id != beast::zero)
|
||||
{
|
||||
TER result = isReplayNetwork
|
||||
? tesSUCCESS
|
||||
: T::checkSeqProxy(ctx.view, ctx.tx, ctx.j);
|
||||
TER result = T::checkSeqProxy(ctx.view, ctx.tx, ctx.j);
|
||||
|
||||
if (!isTesSuccess(result))
|
||||
return result;
|
||||
|
||||
if (!isReplayNetwork)
|
||||
result = T::checkPriorTxAndLastLedger(ctx);
|
||||
result = T::checkPriorTxAndLastLedger(ctx);
|
||||
|
||||
if (!isTesSuccess(result))
|
||||
return result;
|
||||
|
||||
@@ -49,7 +49,7 @@ LogicError(std::string const& s) noexcept
|
||||
{
|
||||
JLOG(debugLog().fatal()) << s;
|
||||
std::cerr << "Logic error: " << s << std::endl;
|
||||
//detail::accessViolation();
|
||||
detail::accessViolation();
|
||||
}
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
@@ -99,12 +99,6 @@ private:
|
||||
bool open_ = true;
|
||||
|
||||
public:
|
||||
std::set<uint256>
|
||||
getAndResetKeysTouched()
|
||||
{
|
||||
return items_.getAndResetKeysTouched();
|
||||
}
|
||||
|
||||
OpenView() = delete;
|
||||
OpenView&
|
||||
operator=(OpenView&&) = delete;
|
||||
|
||||
@@ -35,9 +35,6 @@ namespace detail {
|
||||
// Helper class that buffers raw modifications
|
||||
class RawStateTable
|
||||
{
|
||||
private:
|
||||
mutable std::set<uint256> keysTouched_;
|
||||
|
||||
public:
|
||||
using key_type = ReadView::key_type;
|
||||
// Initial size for the monotonic_buffer_resource used for allocations
|
||||
@@ -101,20 +98,6 @@ public:
|
||||
std::unique_ptr<ReadView::sles_type::iter_base>
|
||||
slesUpperBound(ReadView const& base, uint256 const& key) const;
|
||||
|
||||
// each time a key is read or written it will be placed in the keysTouched_
|
||||
// set.
|
||||
std::set<uint256>
|
||||
getAndResetKeysTouched()
|
||||
{
|
||||
std::set<uint256> out;
|
||||
out.swap(keysTouched_);
|
||||
// std::cout << "--------------\n";
|
||||
// for (auto const& k : out)
|
||||
// std::cout << "getAndResetKeysTouched: " << to_string(k) <<
|
||||
// "\n";
|
||||
return out;
|
||||
}
|
||||
|
||||
private:
|
||||
enum class Action {
|
||||
erase,
|
||||
|
||||
@@ -263,9 +263,6 @@ OpenView::rawTxInsert(
|
||||
std::shared_ptr<Serializer const> const& txn,
|
||||
std::shared_ptr<Serializer const> const& metaData)
|
||||
{
|
||||
if (txExists(key))
|
||||
return;
|
||||
|
||||
auto const result = txs_.emplace(
|
||||
std::piecewise_construct,
|
||||
std::forward_as_tuple(key),
|
||||
|
||||
@@ -173,8 +173,6 @@ RawStateTable::apply(RawView& to) const
|
||||
to.rawReplace(item.sle);
|
||||
break;
|
||||
}
|
||||
|
||||
keysTouched_.emplace(elem.first);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -182,9 +180,6 @@ bool
|
||||
RawStateTable::exists(ReadView const& base, Keylet const& k) const
|
||||
{
|
||||
assert(k.key.isNonZero());
|
||||
|
||||
keysTouched_.insert(k.key);
|
||||
|
||||
auto const iter = items_.find(k.key);
|
||||
if (iter == items_.end())
|
||||
return base.exists(k);
|
||||
@@ -232,18 +227,12 @@ RawStateTable::succ(
|
||||
// what we got from the parent.
|
||||
if (last && next >= last)
|
||||
return std::nullopt;
|
||||
|
||||
if (next.has_value())
|
||||
keysTouched_.insert(*next);
|
||||
|
||||
return next;
|
||||
}
|
||||
|
||||
void
|
||||
RawStateTable::erase(std::shared_ptr<SLE> const& sle)
|
||||
{
|
||||
keysTouched_.insert(sle->key());
|
||||
|
||||
// The base invariant is checked during apply
|
||||
auto const result = items_.emplace(
|
||||
std::piecewise_construct,
|
||||
@@ -270,7 +259,6 @@ RawStateTable::erase(std::shared_ptr<SLE> const& sle)
|
||||
void
|
||||
RawStateTable::insert(std::shared_ptr<SLE> const& sle)
|
||||
{
|
||||
keysTouched_.insert(sle->key());
|
||||
auto const result = items_.emplace(
|
||||
std::piecewise_construct,
|
||||
std::forward_as_tuple(sle->key()),
|
||||
@@ -296,7 +284,6 @@ RawStateTable::insert(std::shared_ptr<SLE> const& sle)
|
||||
void
|
||||
RawStateTable::replace(std::shared_ptr<SLE> const& sle)
|
||||
{
|
||||
keysTouched_.insert(sle->key());
|
||||
auto const result = items_.emplace(
|
||||
std::piecewise_construct,
|
||||
std::forward_as_tuple(sle->key()),
|
||||
@@ -319,7 +306,6 @@ RawStateTable::replace(std::shared_ptr<SLE> const& sle)
|
||||
std::shared_ptr<SLE const>
|
||||
RawStateTable::read(ReadView const& base, Keylet const& k) const
|
||||
{
|
||||
keysTouched_.insert(k.key);
|
||||
auto const iter = items_.find(k.key);
|
||||
if (iter == items_.end())
|
||||
return base.read(k);
|
||||
|
||||
@@ -302,7 +302,7 @@ STTx::checkSingleSign(RequireFullyCanonicalSig requireCanonicalSig) const
|
||||
|
||||
// wildcard network gets a free pass on all signatures
|
||||
bool const isWildcardNetwork =
|
||||
isFieldPresent(sfNetworkID) && getFieldU32(sfNetworkID) >= 65534;
|
||||
isFieldPresent(sfNetworkID) && getFieldU32(sfNetworkID) == 65535;
|
||||
|
||||
bool validSig = false;
|
||||
try
|
||||
|
||||
@@ -669,19 +669,18 @@ JSS(strict); // in: AccountCurrencies, AccountInfo
|
||||
JSS(sub_index); // in: LedgerEntry
|
||||
JSS(subcommand); // in: PathFind
|
||||
JSS(success); // rpc
|
||||
JSS(success_count);
|
||||
JSS(supported); // out: AmendmentTableImpl
|
||||
JSS(system_time_offset); // out: NetworkOPs
|
||||
JSS(tag); // out: Peers
|
||||
JSS(taker); // in: Subscribe, BookOffers
|
||||
JSS(taker_gets); // in: Subscribe, Unsubscribe, BookOffers
|
||||
JSS(taker_gets_funded); // out: NetworkOPs
|
||||
JSS(taker_pays); // in: Subscribe, Unsubscribe, BookOffers
|
||||
JSS(taker_pays_funded); // out: NetworkOPs
|
||||
JSS(threshold); // in: Blacklist
|
||||
JSS(ticket); // in: AccountObjects
|
||||
JSS(ticket_count); // out: AccountInfo
|
||||
JSS(ticket_seq); // in: LedgerEntry
|
||||
JSS(supported); // out: AmendmentTableImpl
|
||||
JSS(system_time_offset); // out: NetworkOPs
|
||||
JSS(tag); // out: Peers
|
||||
JSS(taker); // in: Subscribe, BookOffers
|
||||
JSS(taker_gets); // in: Subscribe, Unsubscribe, BookOffers
|
||||
JSS(taker_gets_funded); // out: NetworkOPs
|
||||
JSS(taker_pays); // in: Subscribe, Unsubscribe, BookOffers
|
||||
JSS(taker_pays_funded); // out: NetworkOPs
|
||||
JSS(threshold); // in: Blacklist
|
||||
JSS(ticket); // in: AccountObjects
|
||||
JSS(ticket_count); // out: AccountInfo
|
||||
JSS(ticket_seq); // in: LedgerEntry
|
||||
JSS(time);
|
||||
JSS(timeouts); // out: InboundLedger
|
||||
JSS(track); // out: PeerImp
|
||||
@@ -705,13 +704,11 @@ JSS(trusted); // out: UnlList
|
||||
JSS(trusted_validator_keys); // out: ValidatorList
|
||||
JSS(tx); // out: STTx, AccountTx*
|
||||
JSS(txroot);
|
||||
JSS(tx_blob); // in/out: Submit,
|
||||
JSS(tx_blobs);
|
||||
// in: TransactionSign, AccountTx*
|
||||
JSS(tx_hash); // in: TransactionEntry
|
||||
JSS(tx_json); // in/out: TransactionSign
|
||||
// out: TransactionEntry
|
||||
JSS(tx_results);
|
||||
JSS(tx_blob); // in/out: Submit,
|
||||
// in: TransactionSign, AccountTx*
|
||||
JSS(tx_hash); // in: TransactionEntry
|
||||
JSS(tx_json); // in/out: TransactionSign
|
||||
// out: TransactionEntry
|
||||
JSS(tx_signing_hash); // out: TransactionSign
|
||||
JSS(tx_unsigned); // out: TransactionSign
|
||||
JSS(txn_count); // out: NetworkOPs
|
||||
|
||||
@@ -29,9 +29,6 @@
|
||||
#include <ripple/rpc/GRPCHandlers.h>
|
||||
#include <ripple/rpc/impl/RPCHelpers.h>
|
||||
#include <ripple/rpc/impl/TransactionSign.h>
|
||||
#include <future>
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
|
||||
namespace ripple {
|
||||
|
||||
@@ -85,220 +82,15 @@ doInject(RPC::JsonContext& context)
|
||||
return jvResult;
|
||||
}
|
||||
|
||||
// Helper function to process a single transaction blob
|
||||
static Json::Value
|
||||
processSingleTransaction(
|
||||
RPC::JsonContext& context,
|
||||
const std::string& txBlob,
|
||||
const NetworkOPs::FailHard& failType)
|
||||
{
|
||||
Json::Value result;
|
||||
|
||||
auto ret = strUnHex(txBlob);
|
||||
if (!ret || !ret->size())
|
||||
{
|
||||
result[jss::error] = "invalidTransaction";
|
||||
result[jss::error_exception] = "Invalid hex encoding";
|
||||
return result;
|
||||
}
|
||||
|
||||
SerialIter sitTrans(makeSlice(*ret));
|
||||
std::shared_ptr<STTx const> stpTrans;
|
||||
|
||||
try
|
||||
{
|
||||
stpTrans = std::make_shared<STTx const>(std::ref(sitTrans));
|
||||
}
|
||||
catch (std::exception& e)
|
||||
{
|
||||
result[jss::error] = "invalidTransaction";
|
||||
result[jss::error_exception] = e.what();
|
||||
return result;
|
||||
}
|
||||
|
||||
// Validity check
|
||||
{
|
||||
if (!context.app.checkSigs())
|
||||
forceValidity(
|
||||
context.app.getHashRouter(),
|
||||
stpTrans->getTransactionID(),
|
||||
Validity::SigGoodOnly);
|
||||
auto [validity, reason] = checkValidity(
|
||||
context.app.getHashRouter(),
|
||||
*stpTrans,
|
||||
context.ledgerMaster.getCurrentLedger()->rules(),
|
||||
context.app.config());
|
||||
if (validity != Validity::Valid)
|
||||
{
|
||||
result[jss::error] = "invalidTransaction";
|
||||
result[jss::error_exception] = "fails local checks: " + reason;
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
std::string reason;
|
||||
auto tpTrans = std::make_shared<Transaction>(stpTrans, reason, context.app);
|
||||
if (tpTrans->getStatus() != NEW)
|
||||
{
|
||||
result[jss::error] = "invalidTransaction";
|
||||
result[jss::error_exception] = "fails local checks: " + reason;
|
||||
return result;
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
context.netOps.processTransaction(
|
||||
tpTrans, isUnlimited(context.role), true, failType);
|
||||
}
|
||||
catch (std::exception& e)
|
||||
{
|
||||
result[jss::error] = "internalSubmit";
|
||||
result[jss::error_exception] = e.what();
|
||||
return result;
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
result[jss::tx_json] = tpTrans->getJson(JsonOptions::none);
|
||||
result[jss::tx_blob] =
|
||||
strHex(tpTrans->getSTransaction()->getSerializer().peekData());
|
||||
|
||||
if (temUNCERTAIN != tpTrans->getResult())
|
||||
{
|
||||
std::string sToken;
|
||||
std::string sHuman;
|
||||
|
||||
transResultInfo(tpTrans->getResult(), sToken, sHuman);
|
||||
|
||||
result[jss::engine_result] = sToken;
|
||||
result[jss::engine_result_code] = tpTrans->getResult();
|
||||
result[jss::engine_result_message] = sHuman;
|
||||
|
||||
auto const submitResult = tpTrans->getSubmitResult();
|
||||
|
||||
result[jss::accepted] = submitResult.any();
|
||||
result[jss::applied] = submitResult.applied;
|
||||
result[jss::broadcast] = submitResult.broadcast;
|
||||
result[jss::queued] = submitResult.queued;
|
||||
result[jss::kept] = submitResult.kept;
|
||||
|
||||
if (auto currentLedgerState = tpTrans->getCurrentLedgerState())
|
||||
{
|
||||
result[jss::account_sequence_next] =
|
||||
safe_cast<Json::Value::UInt>(
|
||||
currentLedgerState->accountSeqNext);
|
||||
result[jss::account_sequence_available] =
|
||||
safe_cast<Json::Value::UInt>(
|
||||
currentLedgerState->accountSeqAvail);
|
||||
result[jss::open_ledger_cost] =
|
||||
to_string(currentLedgerState->minFeeRequired);
|
||||
result[jss::validated_ledger_index] =
|
||||
safe_cast<Json::Value::UInt>(
|
||||
currentLedgerState->validatedLedger);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
catch (std::exception& e)
|
||||
{
|
||||
result[jss::error] = "internalJson";
|
||||
result[jss::error_exception] = e.what();
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
// {
|
||||
// tx_json: <object>,
|
||||
// secret: <secret>
|
||||
// }
|
||||
// OR for batch submission:
|
||||
// {
|
||||
// "tx_blobs": [<blob1>, <blob2>, ...],
|
||||
// }
|
||||
Json::Value
|
||||
doSubmit(RPC::JsonContext& context)
|
||||
{
|
||||
context.loadType = Resource::feeMediumBurdenRPC;
|
||||
|
||||
// Check for batch submission
|
||||
if (context.params.isMember("tx_blobs"))
|
||||
{
|
||||
if (!context.params["tx_blobs"].isArray())
|
||||
return rpcError(rpcINVALID_PARAMS);
|
||||
|
||||
const auto& txBlobs = context.params["tx_blobs"];
|
||||
const auto blobCount = txBlobs.size();
|
||||
|
||||
if (blobCount == 0)
|
||||
return rpcError(rpcINVALID_PARAMS);
|
||||
|
||||
// Limit batch size to prevent resource exhaustion
|
||||
constexpr size_t maxBatchSize = 100;
|
||||
if (blobCount > maxBatchSize)
|
||||
{
|
||||
Json::Value error;
|
||||
error[jss::error] = "batchSizeExceeded";
|
||||
error["error_message"] =
|
||||
"Batch size exceeds maximum of " + std::to_string(maxBatchSize);
|
||||
return error;
|
||||
}
|
||||
|
||||
auto const failType = getFailHard(context);
|
||||
|
||||
// Process transactions in parallel
|
||||
std::vector<std::future<Json::Value>> futures;
|
||||
futures.reserve(blobCount);
|
||||
|
||||
// Launch async tasks for each transaction
|
||||
for (size_t i = 0; i < blobCount; ++i)
|
||||
{
|
||||
if (!txBlobs[i].isString())
|
||||
{
|
||||
// Create error result for invalid blob
|
||||
std::promise<Json::Value> errorPromise;
|
||||
Json::Value errorResult;
|
||||
errorResult[jss::error] = "invalidTransaction";
|
||||
errorResult[jss::error_exception] =
|
||||
"tx_blobs element must be string";
|
||||
errorPromise.set_value(std::move(errorResult));
|
||||
futures.push_back(errorPromise.get_future());
|
||||
continue;
|
||||
}
|
||||
|
||||
const std::string txBlobStr = txBlobs[i].asString();
|
||||
futures.push_back(std::async(
|
||||
std::launch::async, [&context, txBlobStr, failType]() {
|
||||
return processSingleTransaction(
|
||||
context, txBlobStr, failType);
|
||||
}));
|
||||
}
|
||||
|
||||
// Collect results
|
||||
Json::Value jvResult;
|
||||
Json::Value& results = jvResult["tx_results"] = Json::arrayValue;
|
||||
|
||||
for (auto& future : futures)
|
||||
{
|
||||
results.append(future.get());
|
||||
}
|
||||
|
||||
jvResult["batch_count"] = static_cast<Json::UInt>(blobCount);
|
||||
|
||||
// Count successful submissions
|
||||
Json::UInt successCount = 0;
|
||||
for (const auto& result : results)
|
||||
{
|
||||
std::cout << result << "\n";
|
||||
if (!result.isMember(jss::error))
|
||||
++successCount;
|
||||
}
|
||||
jvResult["success_count"] = successCount;
|
||||
|
||||
return jvResult;
|
||||
}
|
||||
|
||||
// Single transaction submission (original code path)
|
||||
if (!context.params.isMember(jss::tx_blob))
|
||||
{
|
||||
auto const failType = getFailHard(context);
|
||||
@@ -324,10 +116,124 @@ doSubmit(RPC::JsonContext& context)
|
||||
return ret;
|
||||
}
|
||||
|
||||
// Process single tx_blob
|
||||
auto const failType = getFailHard(context);
|
||||
return processSingleTransaction(
|
||||
context, context.params[jss::tx_blob].asString(), failType);
|
||||
Json::Value jvResult;
|
||||
|
||||
auto ret = strUnHex(context.params[jss::tx_blob].asString());
|
||||
|
||||
if (!ret || !ret->size())
|
||||
return rpcError(rpcINVALID_PARAMS);
|
||||
|
||||
SerialIter sitTrans(makeSlice(*ret));
|
||||
|
||||
std::shared_ptr<STTx const> stpTrans;
|
||||
|
||||
try
|
||||
{
|
||||
stpTrans = std::make_shared<STTx const>(std::ref(sitTrans));
|
||||
}
|
||||
catch (std::exception& e)
|
||||
{
|
||||
jvResult[jss::error] = "invalidTransaction";
|
||||
jvResult[jss::error_exception] = e.what();
|
||||
|
||||
return jvResult;
|
||||
}
|
||||
|
||||
{
|
||||
if (!context.app.checkSigs())
|
||||
forceValidity(
|
||||
context.app.getHashRouter(),
|
||||
stpTrans->getTransactionID(),
|
||||
Validity::SigGoodOnly);
|
||||
auto [validity, reason] = checkValidity(
|
||||
context.app.getHashRouter(),
|
||||
*stpTrans,
|
||||
context.ledgerMaster.getCurrentLedger()->rules(),
|
||||
context.app.config());
|
||||
if (validity != Validity::Valid)
|
||||
{
|
||||
jvResult[jss::error] = "invalidTransaction";
|
||||
jvResult[jss::error_exception] = "fails local checks: " + reason;
|
||||
|
||||
return jvResult;
|
||||
}
|
||||
}
|
||||
|
||||
std::string reason;
|
||||
auto tpTrans = std::make_shared<Transaction>(stpTrans, reason, context.app);
|
||||
if (tpTrans->getStatus() != NEW)
|
||||
{
|
||||
jvResult[jss::error] = "invalidTransaction";
|
||||
jvResult[jss::error_exception] = "fails local checks: " + reason;
|
||||
|
||||
return jvResult;
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
auto const failType = getFailHard(context);
|
||||
|
||||
context.netOps.processTransaction(
|
||||
tpTrans, isUnlimited(context.role), true, failType);
|
||||
}
|
||||
catch (std::exception& e)
|
||||
{
|
||||
jvResult[jss::error] = "internalSubmit";
|
||||
jvResult[jss::error_exception] = e.what();
|
||||
|
||||
return jvResult;
|
||||
}
|
||||
|
||||
try
|
||||
{
|
||||
jvResult[jss::tx_json] = tpTrans->getJson(JsonOptions::none);
|
||||
jvResult[jss::tx_blob] =
|
||||
strHex(tpTrans->getSTransaction()->getSerializer().peekData());
|
||||
|
||||
if (temUNCERTAIN != tpTrans->getResult())
|
||||
{
|
||||
std::string sToken;
|
||||
std::string sHuman;
|
||||
|
||||
transResultInfo(tpTrans->getResult(), sToken, sHuman);
|
||||
|
||||
jvResult[jss::engine_result] = sToken;
|
||||
jvResult[jss::engine_result_code] = tpTrans->getResult();
|
||||
jvResult[jss::engine_result_message] = sHuman;
|
||||
|
||||
auto const submitResult = tpTrans->getSubmitResult();
|
||||
|
||||
jvResult[jss::accepted] = submitResult.any();
|
||||
jvResult[jss::applied] = submitResult.applied;
|
||||
jvResult[jss::broadcast] = submitResult.broadcast;
|
||||
jvResult[jss::queued] = submitResult.queued;
|
||||
jvResult[jss::kept] = submitResult.kept;
|
||||
|
||||
if (auto currentLedgerState = tpTrans->getCurrentLedgerState())
|
||||
{
|
||||
jvResult[jss::account_sequence_next] =
|
||||
safe_cast<Json::Value::UInt>(
|
||||
currentLedgerState->accountSeqNext);
|
||||
jvResult[jss::account_sequence_available] =
|
||||
safe_cast<Json::Value::UInt>(
|
||||
currentLedgerState->accountSeqAvail);
|
||||
jvResult[jss::open_ledger_cost] =
|
||||
to_string(currentLedgerState->minFeeRequired);
|
||||
jvResult[jss::validated_ledger_index] =
|
||||
safe_cast<Json::Value::UInt>(
|
||||
currentLedgerState->validatedLedger);
|
||||
}
|
||||
}
|
||||
|
||||
return jvResult;
|
||||
}
|
||||
catch (std::exception& e)
|
||||
{
|
||||
jvResult[jss::error] = "internalJson";
|
||||
jvResult[jss::error_exception] = e.what();
|
||||
|
||||
return jvResult;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace ripple
|
||||
|
||||
Reference in New Issue
Block a user