CI for memory safety - updates
This commit is contained in:
parent
4df3687045
commit
08407e48e1
3 changed files with 220 additions and 21 deletions
|
|
@ -9,21 +9,37 @@ Usage:
|
|||
python3 tests/run_tests.py -v # Verbose output (show failure details)
|
||||
python3 tests/run_tests.py --checksums # Show screenshot checksums
|
||||
python3 tests/run_tests.py --timeout=30 # Custom timeout
|
||||
python3 tests/run_tests.py --sanitizer # Detect ASan/UBSan errors in output
|
||||
python3 tests/run_tests.py --valgrind # Run tests under Valgrind memcheck
|
||||
|
||||
Environment variables:
|
||||
MCRF_BUILD_DIR Build directory (default: ../build)
|
||||
MCRF_LIB_DIR Library directory for LD_LIBRARY_PATH (default: ../__lib)
|
||||
MCRF_TIMEOUT_MULTIPLIER Multiply per-test timeout (default: 1, use 50 for valgrind)
|
||||
"""
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
import hashlib
|
||||
import re
|
||||
from pathlib import Path
|
||||
|
||||
# Configuration
|
||||
# Configuration — respect environment overrides for debug/sanitizer builds
|
||||
TESTS_DIR = Path(__file__).parent
|
||||
BUILD_DIR = TESTS_DIR.parent / "build"
|
||||
LIB_DIR = TESTS_DIR.parent / "__lib"
|
||||
BUILD_DIR = Path(os.environ.get('MCRF_BUILD_DIR', str(TESTS_DIR.parent / "build")))
|
||||
LIB_DIR = Path(os.environ.get('MCRF_LIB_DIR', str(TESTS_DIR.parent / "__lib")))
|
||||
MCROGUEFACE = BUILD_DIR / "mcrogueface"
|
||||
DEFAULT_TIMEOUT = 10 # seconds per test
|
||||
|
||||
# Sanitizer error patterns to scan for in stderr
|
||||
SANITIZER_PATTERNS = [
|
||||
re.compile(r'ERROR: AddressSanitizer'),
|
||||
re.compile(r'ERROR: LeakSanitizer'),
|
||||
re.compile(r'runtime error:'), # UBSan
|
||||
re.compile(r'ERROR: ThreadSanitizer'),
|
||||
]
|
||||
|
||||
# Test directories to run (in order)
|
||||
TEST_DIRS = ['unit', 'integration', 'regression']
|
||||
|
||||
|
|
@ -42,8 +58,18 @@ def get_screenshot_checksum(test_dir):
|
|||
checksums[png.name] = hashlib.md5(f.read()).hexdigest()[:8]
|
||||
return checksums
|
||||
|
||||
def run_test(test_path, verbose=False, timeout=DEFAULT_TIMEOUT):
|
||||
"""Run a single test and return (passed, duration, output)."""
|
||||
def check_sanitizer_output(output):
|
||||
"""Check output for sanitizer error messages. Returns list of matches."""
|
||||
errors = []
|
||||
for pattern in SANITIZER_PATTERNS:
|
||||
matches = pattern.findall(output)
|
||||
if matches:
|
||||
errors.extend(matches)
|
||||
return errors
|
||||
|
||||
def run_test(test_path, verbose=False, timeout=DEFAULT_TIMEOUT,
|
||||
sanitizer_mode=False, valgrind_mode=False):
|
||||
"""Run a single test and return (passed, duration, output, sanitizer_errors)."""
|
||||
start = time.time()
|
||||
|
||||
# Clean any existing screenshots
|
||||
|
|
@ -55,9 +81,28 @@ def run_test(test_path, verbose=False, timeout=DEFAULT_TIMEOUT):
|
|||
existing_ld = env.get('LD_LIBRARY_PATH', '')
|
||||
env['LD_LIBRARY_PATH'] = f"{LIB_DIR}:{existing_ld}" if existing_ld else str(LIB_DIR)
|
||||
|
||||
# Build the command
|
||||
cmd = []
|
||||
valgrind_log = None
|
||||
|
||||
if valgrind_mode:
|
||||
valgrind_log = BUILD_DIR / f"valgrind-{test_path.stem}.log"
|
||||
supp_file = TESTS_DIR.parent / "sanitizers" / "valgrind-mcrf.supp"
|
||||
cmd.extend([
|
||||
'valgrind',
|
||||
'--tool=memcheck',
|
||||
'--leak-check=full',
|
||||
'--error-exitcode=42',
|
||||
f'--log-file={valgrind_log}',
|
||||
])
|
||||
if supp_file.exists():
|
||||
cmd.append(f'--suppressions={supp_file}')
|
||||
|
||||
cmd.extend([str(MCROGUEFACE), '--headless', '--exec', str(test_path)])
|
||||
|
||||
try:
|
||||
result = subprocess.run(
|
||||
[str(MCROGUEFACE), '--headless', '--exec', str(test_path)],
|
||||
cmd,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=timeout,
|
||||
|
|
@ -72,12 +117,33 @@ def run_test(test_path, verbose=False, timeout=DEFAULT_TIMEOUT):
|
|||
if 'FAIL' in output and 'PASS' not in output.split('FAIL')[-1]:
|
||||
passed = False
|
||||
|
||||
return passed, duration, output
|
||||
# Check for sanitizer errors in output
|
||||
sanitizer_errors = []
|
||||
if sanitizer_mode:
|
||||
sanitizer_errors = check_sanitizer_output(output)
|
||||
if sanitizer_errors:
|
||||
passed = False
|
||||
|
||||
# Check valgrind results
|
||||
if valgrind_mode:
|
||||
if result.returncode == 42:
|
||||
passed = False
|
||||
if valgrind_log and valgrind_log.exists():
|
||||
vg_output = valgrind_log.read_text()
|
||||
# Extract error summary
|
||||
error_lines = [l for l in vg_output.split('\n')
|
||||
if 'ERROR SUMMARY' in l or 'definitely lost' in l
|
||||
or 'Invalid' in l]
|
||||
sanitizer_errors.extend(error_lines[:5])
|
||||
output += f"\n--- Valgrind log: {valgrind_log} ---\n"
|
||||
output += '\n'.join(error_lines)
|
||||
|
||||
return passed, duration, output, sanitizer_errors
|
||||
|
||||
except subprocess.TimeoutExpired:
|
||||
return False, timeout, "TIMEOUT"
|
||||
return False, timeout, "TIMEOUT", []
|
||||
except Exception as e:
|
||||
return False, 0, str(e)
|
||||
return False, 0, str(e), []
|
||||
|
||||
def find_tests(directory):
|
||||
"""Find all test files in a directory."""
|
||||
|
|
@ -88,7 +154,9 @@ def find_tests(directory):
|
|||
|
||||
def main():
|
||||
verbose = '-v' in sys.argv or '--verbose' in sys.argv
|
||||
show_checksums = '--checksums' in sys.argv # off by default; use --checksums to show
|
||||
show_checksums = '--checksums' in sys.argv
|
||||
sanitizer_mode = '--sanitizer' in sys.argv
|
||||
valgrind_mode = '--valgrind' in sys.argv
|
||||
|
||||
# Parse --timeout=N
|
||||
timeout = DEFAULT_TIMEOUT
|
||||
|
|
@ -99,6 +167,10 @@ def main():
|
|||
except ValueError:
|
||||
pass
|
||||
|
||||
# Apply timeout multiplier from environment
|
||||
timeout_multiplier = float(os.environ.get('MCRF_TIMEOUT_MULTIPLIER', '1'))
|
||||
effective_timeout = timeout * timeout_multiplier
|
||||
|
||||
# Determine which directories to test
|
||||
dirs_to_test = []
|
||||
for arg in sys.argv[1:]:
|
||||
|
|
@ -107,8 +179,16 @@ def main():
|
|||
if not dirs_to_test:
|
||||
dirs_to_test = TEST_DIRS
|
||||
|
||||
print(f"{BOLD}McRogueFace Test Runner{RESET}")
|
||||
print(f"Testing: {', '.join(dirs_to_test)} (timeout: {timeout}s)")
|
||||
# Header
|
||||
mode_str = ""
|
||||
if sanitizer_mode:
|
||||
mode_str = f" {YELLOW}[ASan/UBSan]{RESET}"
|
||||
elif valgrind_mode:
|
||||
mode_str = f" {YELLOW}[Valgrind]{RESET}"
|
||||
|
||||
print(f"{BOLD}McRogueFace Test Runner{RESET}{mode_str}")
|
||||
print(f"Build: {BUILD_DIR}")
|
||||
print(f"Testing: {', '.join(dirs_to_test)} (timeout: {effective_timeout:.0f}s)")
|
||||
print("=" * 60)
|
||||
|
||||
results = {'pass': 0, 'fail': 0, 'total_time': 0}
|
||||
|
|
@ -123,7 +203,11 @@ def main():
|
|||
|
||||
for test_path in tests:
|
||||
test_name = test_path.name
|
||||
passed, duration, output = run_test(test_path, verbose, timeout)
|
||||
passed, duration, output, san_errors = run_test(
|
||||
test_path, verbose, effective_timeout,
|
||||
sanitizer_mode=sanitizer_mode,
|
||||
valgrind_mode=valgrind_mode
|
||||
)
|
||||
results['total_time'] += duration
|
||||
|
||||
if passed:
|
||||
|
|
@ -132,7 +216,7 @@ def main():
|
|||
else:
|
||||
results['fail'] += 1
|
||||
status = f"{RED}FAIL{RESET}"
|
||||
failures.append((test_dir, test_name, output))
|
||||
failures.append((test_dir, test_name, output, san_errors))
|
||||
|
||||
# Get screenshot checksums if any were generated
|
||||
checksum_str = ""
|
||||
|
|
@ -141,10 +225,18 @@ def main():
|
|||
if checksums:
|
||||
checksum_str = f" [{', '.join(f'{k}:{v}' for k,v in checksums.items())}]"
|
||||
|
||||
print(f" {status} {test_name} ({duration:.2f}s){checksum_str}")
|
||||
# Show sanitizer error indicator
|
||||
san_str = ""
|
||||
if san_errors:
|
||||
san_str = f" {RED}[SANITIZER]{RESET}"
|
||||
|
||||
print(f" {status} {test_name} ({duration:.2f}s){checksum_str}{san_str}")
|
||||
|
||||
if verbose and not passed:
|
||||
print(f" Output: {output[:200]}...")
|
||||
if san_errors:
|
||||
for err in san_errors[:3]:
|
||||
print(f" {RED}>> {err}{RESET}")
|
||||
|
||||
# Summary
|
||||
print("\n" + "=" * 60)
|
||||
|
|
@ -154,10 +246,16 @@ def main():
|
|||
print(f"{BOLD}Results:{RESET} {results['pass']}/{total} passed ({pass_rate:.1f}%)")
|
||||
print(f"{BOLD}Time:{RESET} {results['total_time']:.2f}s")
|
||||
|
||||
if valgrind_mode:
|
||||
print(f"{BOLD}Valgrind logs:{RESET} {BUILD_DIR}/valgrind-*.log")
|
||||
|
||||
if failures:
|
||||
print(f"\n{RED}{BOLD}Failures:{RESET}")
|
||||
for test_dir, test_name, output in failures:
|
||||
for test_dir, test_name, output, san_errors in failures:
|
||||
print(f" {test_dir}/{test_name}")
|
||||
if san_errors:
|
||||
for err in san_errors[:3]:
|
||||
print(f" {RED}>> {err}{RESET}")
|
||||
if verbose:
|
||||
# Show last few lines of output
|
||||
lines = output.strip().split('\n')[-5:]
|
||||
|
|
|
|||
78
tests/test_gridstate_resize.py
Normal file
78
tests/test_gridstate_resize.py
Normal file
|
|
@ -0,0 +1,78 @@
|
|||
"""Regression test: entity gridstate must resize when moving between grids.
|
||||
|
||||
Bug: UIEntity::set_grid() only initialized gridstate when it was empty
|
||||
(size == 0). When an entity moved from a small grid to a larger grid,
|
||||
gridstate kept the old size. UIEntity::updateVisibility() then wrote
|
||||
past the end of the vector using the new grid's dimensions, corrupting
|
||||
adjacent heap memory.
|
||||
|
||||
Trigger: any entity that calls update_visibility() after moving to a
|
||||
larger grid. In Liber Noster this was the player entity using the
|
||||
engine's perspective FOV system across zone transitions.
|
||||
|
||||
This script should exit cleanly. Before the fix, it segfaulted or
|
||||
produced incorrect gridstate lengths.
|
||||
"""
|
||||
import mcrfpy
|
||||
|
||||
# Create a small grid and a large grid
|
||||
small = mcrfpy.Grid(grid_size=(10, 10))
|
||||
large = mcrfpy.Grid(grid_size=(50, 50))
|
||||
|
||||
# Create an entity on the small grid
|
||||
entity = mcrfpy.Entity(grid_pos=(5, 5), grid=small)
|
||||
|
||||
# Force gridstate initialization by calling update_visibility
|
||||
small.perspective = entity
|
||||
small.fov_radius = 4
|
||||
entity.update_visibility() # gridstate sized to 10*10 = 100
|
||||
|
||||
# Verify gridstate matches small grid
|
||||
gs = entity.gridstate
|
||||
assert len(gs) == 100, f"Expected gridstate size 100 for 10x10 grid, got {len(gs)}"
|
||||
|
||||
# Move entity to the larger grid
|
||||
entity.grid = large
|
||||
|
||||
# Gridstate must now match the large grid's dimensions
|
||||
gs = entity.gridstate
|
||||
assert len(gs) == 2500, f"Expected gridstate size 2500 for 50x50 grid, got {len(gs)}"
|
||||
|
||||
# Set up perspective on the large grid
|
||||
large.perspective = entity
|
||||
large.fov_radius = 8
|
||||
|
||||
# This triggers updateVisibility() which iterates 50*50 = 2500 cells.
|
||||
# Before the fix, gridstate was only 100 entries — heap buffer overflow.
|
||||
entity.update_visibility()
|
||||
|
||||
# Stress test: repeatedly move between grids of different sizes to
|
||||
# exercise the resize path and pressure the heap allocator.
|
||||
grids = [mcrfpy.Grid(grid_size=(s, s)) for s in (5, 80, 3, 60, 10, 100)]
|
||||
for g in grids:
|
||||
entity.grid = g
|
||||
g.perspective = entity
|
||||
g.fov_radius = 4
|
||||
entity.update_visibility()
|
||||
gs = entity.gridstate
|
||||
expected = g.grid_w * g.grid_h
|
||||
assert len(gs) == expected, f"Expected {expected}, got {len(gs)} for {g.grid_w}x{g.grid_h}"
|
||||
|
||||
# Also allocate other objects between transitions to fill freed heap
|
||||
# regions — makes corruption more likely to manifest as a crash.
|
||||
for i in range(20):
|
||||
small_g = mcrfpy.Grid(grid_size=(5, 5))
|
||||
entity.grid = small_g
|
||||
small_g.perspective = entity
|
||||
entity.update_visibility()
|
||||
|
||||
big_g = mcrfpy.Grid(grid_size=(80, 80))
|
||||
entity.grid = big_g
|
||||
big_g.perspective = entity
|
||||
entity.update_visibility()
|
||||
|
||||
# Create and destroy interim objects to churn the heap
|
||||
frames = [mcrfpy.Frame() for _ in range(10)]
|
||||
del frames
|
||||
|
||||
print("PASS: gridstate resized correctly across all transitions")
|
||||
Loading…
Add table
Add a link
Reference in a new issue