major refactor (untested)
This commit is contained in:
parent
a29412b4da
commit
a77a0c0393
22 changed files with 1037 additions and 24 deletions
38
tests/test_error_rate_plot.py
Normal file
38
tests/test_error_rate_plot.py
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
import os
|
||||
import pytest
|
||||
from unittest.mock import patch, mock_open
|
||||
from archive.test_errorrate_runner import generate_error_rate_plot
|
||||
|
||||
@pytest.fixture
|
||||
def mock_data_dir(tmp_path):
|
||||
"""Fixture to create a temporary data directory."""
|
||||
data_dir = tmp_path / "data"
|
||||
data_dir.mkdir()
|
||||
return data_dir
|
||||
|
||||
@patch("builtins.open", new_callable=mock_open, read_data="{}")
|
||||
@patch("os.path.exists", return_value=True)
|
||||
def test_generate_error_rate_plot_no_data(mock_exists, mock_open, mock_data_dir):
|
||||
"""Test generate_error_rate_plot with no data."""
|
||||
plot_path, summary = generate_error_rate_plot(str(mock_data_dir / "applications.json"))
|
||||
assert plot_path is None
|
||||
assert summary == ""
|
||||
|
||||
@patch("builtins.open", new_callable=mock_open)
|
||||
@patch("os.path.exists", return_value=True)
|
||||
@patch("matplotlib.pyplot.savefig")
|
||||
def test_generate_error_rate_plot_with_data(mock_savefig, mock_exists, mock_open, mock_data_dir):
|
||||
"""Test generate_error_rate_plot with valid data."""
|
||||
mock_open.return_value.read.return_value = """
|
||||
{
|
||||
"1": {"timestamp": "2025-12-25T12:00:00", "company": "CompanyA", "success": true},
|
||||
"2": {"timestamp": "2025-12-26T12:00:00", "company": "CompanyB", "success": false}
|
||||
}
|
||||
"""
|
||||
plot_path, summary = generate_error_rate_plot(str(mock_data_dir / "applications.json"))
|
||||
assert plot_path is not None
|
||||
assert "Total attempts" in summary
|
||||
assert "Successes" in summary
|
||||
assert "Failures" in summary
|
||||
assert "Overall error rate" in summary
|
||||
mock_savefig.assert_called_once()
|
||||
|
|
@ -1,151 +0,0 @@
|
|||
#!/usr/bin/env python3
|
||||
"""Simple test runner for the monitor's error-rate plot generator.
|
||||
|
||||
Run from the repository root (where `monitor.py` and `data/` live):
|
||||
|
||||
python3 tests/test_errorrate_runner.py
|
||||
|
||||
This will call `TelegramBot._generate_error_rate_plot()` and print the result.
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
import logging
|
||||
|
||||
logging.basicConfig(level=logging.INFO)
|
||||
|
||||
import pandas as pd
|
||||
import matplotlib
|
||||
matplotlib.use('Agg')
|
||||
import matplotlib.pyplot as plt
|
||||
import json
|
||||
|
||||
DATA_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'data'))
|
||||
APPLICATIONS_FILE = os.path.join(DATA_DIR, 'applications.json')
|
||||
|
||||
def generate_error_rate_plot(applications_file: str):
|
||||
if not os.path.exists(applications_file):
|
||||
print('No applications.json found at', applications_file)
|
||||
return None, ''
|
||||
try:
|
||||
with open(applications_file, 'r', encoding='utf-8') as f:
|
||||
apps = json.load(f)
|
||||
if not apps:
|
||||
return None, ''
|
||||
|
||||
rows = []
|
||||
for _id, rec in apps.items():
|
||||
ts = rec.get('timestamp')
|
||||
try:
|
||||
dt = pd.to_datetime(ts)
|
||||
except Exception:
|
||||
dt = pd.NaT
|
||||
rows.append({'id': _id, 'company': rec.get('company'), 'success': bool(rec.get('success')), 'ts': dt})
|
||||
df = pd.DataFrame(rows)
|
||||
df = df.dropna(subset=['ts'])
|
||||
if df.empty:
|
||||
return None, ''
|
||||
|
||||
df['date'] = df['ts'].dt.floor('D')
|
||||
grouped = df.groupby('date').agg(total=('id','count'), successes=('success', lambda x: x.sum()))
|
||||
grouped['failures'] = grouped['total'] - grouped['successes']
|
||||
grouped['error_rate'] = grouped['failures'] / grouped['total']
|
||||
grouped = grouped.sort_index()
|
||||
|
||||
# Plot
|
||||
import matplotlib.dates as mdates
|
||||
# Add a third subplot for error rate by company
|
||||
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(12, 12), sharex=True)
|
||||
|
||||
# Stacked bar: successes vs failures (all companies)
|
||||
grouped[['successes','failures']].plot(kind='bar', stacked=True, ax=ax1, color=['#2E8B57','#C44A4A'])
|
||||
ax1.set_ylabel('Count')
|
||||
ax1.set_title('Autopilot: Successes vs Failures (by day)')
|
||||
|
||||
dates = pd.to_datetime(grouped.index).to_pydatetime()
|
||||
x = mdates.date2num(dates)
|
||||
width = 0.6
|
||||
|
||||
ax1.bar(x, grouped['successes'].values, width=width, color='#2E8B57', align='center')
|
||||
ax1.bar(x, grouped['failures'].values, bottom=grouped['successes'].values, width=width, color='#C44A4A', align='center')
|
||||
ax1.set_xticks(x)
|
||||
ax1.set_xlim(min(x) - 1, max(x) + 1)
|
||||
ax1.xaxis.set_major_locator(mdates.AutoDateLocator())
|
||||
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
|
||||
|
||||
# Line: overall error rate
|
||||
ax2.plot(x, grouped['error_rate'].values, marker='o', color='#3333AA', linewidth=2)
|
||||
ax2.set_ylim(-0.02, 1.02)
|
||||
ax2.set_ylabel('Error rate')
|
||||
ax2.set_xlabel('Date')
|
||||
ax2.set_title('Daily Error Rate (failures / total)')
|
||||
ax2.grid(True, alpha=0.3)
|
||||
ax2.set_xticks(x)
|
||||
ax2.set_xlim(min(x) - 1, max(x) + 1)
|
||||
ax2.xaxis.set_major_locator(mdates.AutoDateLocator())
|
||||
ax2.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
|
||||
|
||||
# New: Error rate by company (line plot)
|
||||
# Group by date and company
|
||||
company_grouped = df.groupby(['date', 'company']).agg(total=('id','count'), successes=('success', lambda x: x.sum()))
|
||||
company_grouped['failures'] = company_grouped['total'] - company_grouped['successes']
|
||||
company_grouped['error_rate'] = company_grouped['failures'] / company_grouped['total']
|
||||
company_grouped = company_grouped.reset_index()
|
||||
# Pivot for plotting: index=date, columns=company, values=error_rate
|
||||
error_rate_pivot = company_grouped.pivot(index='date', columns='company', values='error_rate')
|
||||
# Plot each company as a line
|
||||
for company in error_rate_pivot.columns:
|
||||
y = error_rate_pivot[company].values
|
||||
ax3.plot(x, y, marker='o', label=str(company))
|
||||
ax3.set_ylim(-0.02, 1.02)
|
||||
ax3.set_ylabel('Error rate')
|
||||
ax3.set_xlabel('Date')
|
||||
ax3.set_title('Daily Error Rate by Company')
|
||||
ax3.grid(True, alpha=0.3)
|
||||
ax3.set_xticks(x)
|
||||
ax3.set_xlim(min(x) - 1, max(x) + 1)
|
||||
ax3.xaxis.set_major_locator(mdates.AutoDateLocator())
|
||||
ax3.xaxis.set_major_formatter(mdates.DateFormatter('%Y-%m-%d'))
|
||||
ax3.legend(title='Company', loc='upper right', fontsize='small')
|
||||
|
||||
fig.autofmt_xdate()
|
||||
|
||||
plot_path = os.path.join(DATA_DIR, 'error_rate.png')
|
||||
tmp_path = os.path.join(DATA_DIR, 'error_rate.tmp.png')
|
||||
fig.savefig(tmp_path, format='png')
|
||||
plt.close(fig)
|
||||
try:
|
||||
# Atomic replace where possible
|
||||
os.replace(tmp_path, plot_path)
|
||||
except Exception:
|
||||
try:
|
||||
if os.path.exists(plot_path):
|
||||
os.remove(plot_path)
|
||||
os.rename(tmp_path, plot_path)
|
||||
except Exception as e:
|
||||
print('Failed to write plot file:', e)
|
||||
return None, ''
|
||||
|
||||
total_attempts = int(grouped['total'].sum())
|
||||
total_success = int(grouped['successes'].sum())
|
||||
total_fail = int(grouped['failures'].sum())
|
||||
overall_error = (total_fail / total_attempts) if total_attempts>0 else 0.0
|
||||
summary = f"<b>Total attempts:</b> {total_attempts}\n<b>Successes:</b> {total_success}\n<b>Failures:</b> {total_fail}\n<b>Overall error rate:</b> {overall_error:.1%}"
|
||||
return plot_path, summary
|
||||
except Exception as e:
|
||||
print('Error generating plot:', e)
|
||||
return None, ''
|
||||
|
||||
def main():
|
||||
# Use the local implementation to avoid importing the full monitor (Playwright heavy)
|
||||
plot_path, summary = generate_error_rate_plot(APPLICATIONS_FILE)
|
||||
if plot_path:
|
||||
print("PLOT_PATH:", plot_path)
|
||||
print("EXISTS:", os.path.exists(plot_path))
|
||||
print("SUMMARY:\n", summary)
|
||||
sys.exit(0)
|
||||
else:
|
||||
print("No plot generated (insufficient data or error)")
|
||||
sys.exit(3)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
62
tests/test_handlers.py
Normal file
62
tests/test_handlers.py
Normal file
|
|
@ -0,0 +1,62 @@
|
|||
import pytest
|
||||
from handlers.howoge_handler import HowogeHandler
|
||||
from handlers.gewobag_handler import GewobagHandler
|
||||
from handlers.degewo_handler import DegewoHandler
|
||||
from handlers.gesobau_handler import GesobauHandler
|
||||
from handlers.stadtundland_handler import StadtUndLandHandler
|
||||
from handlers.wbm_handler import WBMHandler
|
||||
from unittest.mock import AsyncMock
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_howoge_handler():
|
||||
context = AsyncMock()
|
||||
handler = HowogeHandler(context)
|
||||
listing = {"link": "https://www.howoge.de/example"}
|
||||
result = {"success": False}
|
||||
await handler.apply(listing, result)
|
||||
assert "success" in result
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_gewobag_handler():
|
||||
context = AsyncMock()
|
||||
handler = GewobagHandler(context)
|
||||
listing = {"link": "https://www.gewobag.de/example"}
|
||||
result = {"success": False}
|
||||
await handler.apply(listing, result)
|
||||
assert "success" in result
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_degewo_handler():
|
||||
context = AsyncMock()
|
||||
handler = DegewoHandler(context)
|
||||
listing = {"link": "https://www.degewo.de/example"}
|
||||
result = {"success": False}
|
||||
await handler.apply(listing, result)
|
||||
assert "success" in result
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_gesobau_handler():
|
||||
context = AsyncMock()
|
||||
handler = GesobauHandler(context)
|
||||
listing = {"link": "https://www.gesobau.de/example"}
|
||||
result = {"success": False}
|
||||
await handler.apply(listing, result)
|
||||
assert "success" in result
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_stadtundland_handler():
|
||||
context = AsyncMock()
|
||||
handler = StadtUndLandHandler(context)
|
||||
listing = {"link": "https://www.stadtundland.de/example"}
|
||||
result = {"success": False}
|
||||
await handler.apply(listing, result)
|
||||
assert "success" in result
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_wbm_handler():
|
||||
context = AsyncMock()
|
||||
handler = WBMHandler(context)
|
||||
listing = {"link": "https://www.wbm.de/example"}
|
||||
result = {"success": False}
|
||||
await handler.apply(listing, result)
|
||||
assert "success" in result
|
||||
62
tests/test_telegram_bot.py
Normal file
62
tests/test_telegram_bot.py
Normal file
|
|
@ -0,0 +1,62 @@
|
|||
import os
|
||||
import pytest
|
||||
from unittest.mock import MagicMock, patch
|
||||
from telegram_bot import TelegramBot
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# Load environment variables from .env file
|
||||
load_dotenv()
|
||||
|
||||
# Explicitly pass token and chat ID to ensure they are set
|
||||
@pytest.fixture(autouse=True)
|
||||
def mock_env_vars():
|
||||
os.environ["TELEGRAM_BOT_TOKEN"] = "test_token"
|
||||
os.environ["TELEGRAM_CHAT_ID"] = "test_chat_id"
|
||||
|
||||
@pytest.fixture
|
||||
def mock_monitor():
|
||||
monitor = MagicMock()
|
||||
monitor.load_state.return_value = {"autopilot": True}
|
||||
monitor.load_applications.return_value = {
|
||||
"app1": {"company": "CompanyA"},
|
||||
"app2": {"company": "CompanyB"},
|
||||
"app3": {"company": "CompanyA"},
|
||||
}
|
||||
return monitor
|
||||
|
||||
@pytest.fixture
|
||||
def telegram_bot(mock_monitor):
|
||||
return TelegramBot(mock_monitor, bot_token="test_token", chat_id="test_chat_id")
|
||||
|
||||
@patch("telegram_bot.requests.post")
|
||||
def test_send_message(mock_post, telegram_bot):
|
||||
mock_post.return_value.ok = True
|
||||
telegram_bot._send_message("Test message")
|
||||
mock_post.assert_called_once()
|
||||
assert mock_post.call_args[1]["json"]["text"] == "Test message"
|
||||
|
||||
@patch("telegram_bot.requests.post")
|
||||
def test_send_photo(mock_post, telegram_bot):
|
||||
mock_post.return_value.ok = True
|
||||
with patch("builtins.open", create=True):
|
||||
telegram_bot._send_photo("/path/to/photo.jpg", "Test caption")
|
||||
mock_post.assert_called_once()
|
||||
assert mock_post.call_args[1]["data"]["caption"] == "Test caption"
|
||||
|
||||
@patch("telegram_bot.TelegramBot._send_message")
|
||||
def test_handle_status_command(mock_send_message, telegram_bot):
|
||||
telegram_bot._handle_status_command()
|
||||
mock_send_message.assert_called_once()
|
||||
assert "Autopilot" in mock_send_message.call_args[0][0]
|
||||
|
||||
@patch("telegram_bot.TelegramBot._send_message")
|
||||
def test_handle_help_command(mock_send_message, telegram_bot):
|
||||
telegram_bot._handle_help_command()
|
||||
mock_send_message.assert_called_once()
|
||||
assert "InBerlin Monitor Commands" in mock_send_message.call_args[0][0]
|
||||
|
||||
@patch("telegram_bot.TelegramBot._send_message")
|
||||
def test_handle_unknown_command(mock_send_message, telegram_bot):
|
||||
telegram_bot._handle_unknown_command("/unknown")
|
||||
mock_send_message.assert_called_once()
|
||||
assert "Unknown command" in mock_send_message.call_args[0][0]
|
||||
Loading…
Add table
Add a link
Reference in a new issue