feat: Add automatic price reduction on reposts (#691)

This commit is contained in:
Jens
2025-12-17 20:31:58 +01:00
committed by GitHub
parent 25079c32c0
commit 920ddf5533
13 changed files with 1753 additions and 22 deletions

View File

@@ -1,11 +1,18 @@
# SPDX-FileCopyrightText: © Sebastian Thomschke and contributors
# SPDX-License-Identifier: AGPL-3.0-or-later
# SPDX-ArtifactOfProjectHomePage: https://github.com/Second-Hand-Friends/kleinanzeigen-bot/
from __future__ import annotations
import math
from kleinanzeigen_bot.model.ad_model import AdPartial
import pytest
from kleinanzeigen_bot.model.ad_model import MAX_DESCRIPTION_LENGTH, Ad, AdPartial, ShippingOption, calculate_auto_price
from kleinanzeigen_bot.model.config_model import AdDefaults, AutoPriceReductionConfig
from kleinanzeigen_bot.utils.pydantics import ContextualModel, ContextualValidationError
@pytest.mark.unit
def test_update_content_hash() -> None:
minimal_ad_cfg = {
"id": "123456789",
@@ -41,6 +48,37 @@ def test_update_content_hash() -> None:
}).update_content_hash().content_hash != minimal_ad_cfg_hash
@pytest.mark.unit
def test_price_reduction_count_does_not_influence_content_hash() -> None:
base_ad_cfg = {
"id": "123456789",
"title": "Test Ad Title",
"category": "160",
"description": "Test Description",
"price_type": "NEGOTIABLE",
}
hash_without_reposts = AdPartial.model_validate(base_ad_cfg | {"price_reduction_count": 0}).update_content_hash().content_hash
hash_with_reposts = AdPartial.model_validate(base_ad_cfg | {"price_reduction_count": 5}).update_content_hash().content_hash
assert hash_without_reposts == hash_with_reposts
@pytest.mark.unit
def test_repost_count_does_not_influence_content_hash() -> None:
base_ad_cfg = {
"id": "123456789",
"title": "Test Ad Title",
"category": "160",
"description": "Test Description",
"price_type": "NEGOTIABLE",
}
hash_without_reposts = AdPartial.model_validate(base_ad_cfg | {"repost_count": 0}).update_content_hash().content_hash
hash_with_reposts = AdPartial.model_validate(base_ad_cfg | {"repost_count": 5}).update_content_hash().content_hash
assert hash_without_reposts == hash_with_reposts
@pytest.mark.unit
def test_shipping_costs() -> None:
minimal_ad_cfg = {
"id": "123456789",
@@ -60,3 +98,337 @@ def test_shipping_costs() -> None:
assert AdPartial.model_validate(minimal_ad_cfg | {"shipping_costs": " "}).shipping_costs is None
assert AdPartial.model_validate(minimal_ad_cfg | {"shipping_costs": None}).shipping_costs is None
assert AdPartial.model_validate(minimal_ad_cfg).shipping_costs is None
class ShippingOptionWrapper(ContextualModel):
option:ShippingOption
@pytest.mark.unit
def test_shipping_option_must_not_be_blank() -> None:
with pytest.raises(ContextualValidationError, match = "must be non-empty and non-blank"):
ShippingOptionWrapper.model_validate({"option": " "})
@pytest.mark.unit
def test_description_length_limit() -> None:
cfg = {
"title": "Description Length",
"category": "160",
"description": "x" * (MAX_DESCRIPTION_LENGTH + 1)
}
with pytest.raises(ContextualValidationError, match = f"description length exceeds {MAX_DESCRIPTION_LENGTH} characters"):
AdPartial.model_validate(cfg)
@pytest.fixture
def base_ad_cfg() -> dict[str, object]:
return {
"title": "Test Ad Title",
"category": "160",
"description": "Test Description",
"price_type": "NEGOTIABLE",
"contact": {"name": "Test User", "zipcode": "12345"},
"shipping_type": "PICKUP",
"sell_directly": False,
"type": "OFFER",
"active": True
}
@pytest.fixture
def complete_ad_cfg(base_ad_cfg:dict[str, object]) -> dict[str, object]:
return base_ad_cfg | {
"republication_interval": 7,
"price": 100,
"auto_price_reduction": {
"enabled": True,
"strategy": "FIXED",
"amount": 5,
"min_price": 50,
"delay_reposts": 0,
"delay_days": 0
}
}
class SparseDumpAdPartial(AdPartial):
def model_dump(self, *args:object, **kwargs:object) -> dict[str, object]:
data = super().model_dump(*args, **kwargs) # type: ignore[arg-type]
data.pop("price_reduction_count", None)
data.pop("repost_count", None)
return data
@pytest.mark.unit
def test_auto_reduce_requires_price(base_ad_cfg:dict[str, object]) -> None:
cfg = base_ad_cfg.copy() | {
"auto_price_reduction": {
"enabled": True,
"strategy": "FIXED",
"amount": 5,
"min_price": 50
}
}
with pytest.raises(ContextualValidationError, match = "price must be specified"):
AdPartial.model_validate(cfg).to_ad(AdDefaults())
@pytest.mark.unit
def test_auto_reduce_requires_strategy(base_ad_cfg:dict[str, object]) -> None:
cfg = base_ad_cfg.copy() | {
"price": 100,
"auto_price_reduction": {
"enabled": True,
"min_price": 50
}
}
with pytest.raises(ContextualValidationError, match = "strategy must be specified"):
AdPartial.model_validate(cfg).to_ad(AdDefaults())
@pytest.mark.unit
def test_prepare_ad_model_fills_missing_counters(base_ad_cfg:dict[str, object]) -> None:
cfg = base_ad_cfg.copy() | {
"price": 120,
"shipping_type": "SHIPPING",
"sell_directly": False
}
ad = AdPartial.model_validate(cfg).to_ad(AdDefaults())
assert ad.auto_price_reduction.delay_reposts == 0
assert ad.auto_price_reduction.delay_days == 0
assert ad.price_reduction_count == 0
assert ad.repost_count == 0
@pytest.mark.unit
def test_min_price_must_not_exceed_price(base_ad_cfg:dict[str, object]) -> None:
cfg = base_ad_cfg.copy() | {
"price": 100,
"auto_price_reduction": {
"enabled": True,
"strategy": "FIXED",
"amount": 5,
"min_price": 120
}
}
with pytest.raises(ContextualValidationError, match = "min_price must not exceed price"):
AdPartial.model_validate(cfg)
@pytest.mark.unit
def test_min_price_validation_defers_to_pydantic_for_invalid_types(base_ad_cfg:dict[str, object]) -> None:
# Test that invalid price/min_price types are handled gracefully
# The safe Decimal comparison should catch conversion errors and defer to Pydantic
cfg = base_ad_cfg.copy() | {
"price": "not_a_number",
"auto_price_reduction": {
"enabled": True,
"strategy": "FIXED",
"amount": 5,
"min_price": 100
}
}
# Should raise Pydantic validation error for invalid price type, not our custom validation error
with pytest.raises(ContextualValidationError):
AdPartial.model_validate(cfg)
# Test with invalid min_price type
cfg2 = base_ad_cfg.copy() | {
"price": 100,
"auto_price_reduction": {
"enabled": True,
"strategy": "FIXED",
"amount": 5,
"min_price": "invalid"
}
}
# Should raise Pydantic validation error for invalid min_price type
with pytest.raises(ContextualValidationError):
AdPartial.model_validate(cfg2)
@pytest.mark.unit
def test_auto_reduce_requires_min_price(base_ad_cfg:dict[str, object]) -> None:
cfg = base_ad_cfg.copy() | {
"price": 100,
"auto_price_reduction": {
"enabled": True,
"strategy": "FIXED",
"amount": 5
}
}
with pytest.raises(ContextualValidationError, match = "min_price must be specified"):
AdPartial.model_validate(cfg).to_ad(AdDefaults())
@pytest.mark.unit
def test_to_ad_stabilizes_counters_when_defaults_omit(base_ad_cfg:dict[str, object]) -> None:
cfg = base_ad_cfg.copy() | {
"republication_interval": 7,
"price": 120
}
ad = AdPartial.model_validate(cfg).to_ad(AdDefaults())
assert ad.auto_price_reduction.delay_reposts == 0
assert ad.auto_price_reduction.delay_days == 0
assert ad.price_reduction_count == 0
assert ad.repost_count == 0
@pytest.mark.unit
def test_to_ad_sets_zero_when_counts_missing_from_dump(base_ad_cfg:dict[str, object]) -> None:
cfg = base_ad_cfg.copy() | {
"republication_interval": 7,
"price": 130
}
ad = SparseDumpAdPartial.model_validate(cfg).to_ad(AdDefaults())
assert ad.price_reduction_count == 0
assert ad.repost_count == 0
@pytest.mark.unit
def test_ad_model_auto_reduce_requires_price(complete_ad_cfg:dict[str, object]) -> None:
cfg = complete_ad_cfg.copy() | {"price": None}
with pytest.raises(ContextualValidationError, match = "price must be specified"):
Ad.model_validate(cfg)
@pytest.mark.unit
def test_ad_model_auto_reduce_requires_strategy(complete_ad_cfg:dict[str, object]) -> None:
cfg_copy = complete_ad_cfg.copy()
cfg_copy["auto_price_reduction"] = {
"enabled": True,
"min_price": 50
}
with pytest.raises(ContextualValidationError, match = "strategy must be specified"):
Ad.model_validate(cfg_copy)
@pytest.mark.unit
def test_price_reduction_delay_inherited_from_defaults(complete_ad_cfg:dict[str, object]) -> None:
# When auto_price_reduction is not specified in ad config, it inherits from defaults
cfg = complete_ad_cfg.copy()
cfg.pop("auto_price_reduction", None) # Remove to inherit from defaults
defaults = AdDefaults(
auto_price_reduction = AutoPriceReductionConfig(
enabled = True,
strategy = "FIXED",
amount = 5,
min_price = 50,
delay_reposts = 4,
delay_days = 0
)
)
ad = AdPartial.model_validate(cfg).to_ad(defaults)
assert ad.auto_price_reduction.delay_reposts == 4
@pytest.mark.unit
def test_price_reduction_delay_override_zero(complete_ad_cfg:dict[str, object]) -> None:
cfg = complete_ad_cfg.copy()
# Type-safe way to modify nested dict
cfg["auto_price_reduction"] = {
"enabled": True,
"strategy": "FIXED",
"amount": 5,
"min_price": 50,
"delay_reposts": 0,
"delay_days": 0
}
defaults = AdDefaults(
auto_price_reduction = AutoPriceReductionConfig(
enabled = True,
strategy = "FIXED",
amount = 5,
min_price = 50,
delay_reposts = 4,
delay_days = 0
)
)
ad = AdPartial.model_validate(cfg).to_ad(defaults)
assert ad.auto_price_reduction.delay_reposts == 0
@pytest.mark.unit
def test_ad_model_auto_reduce_requires_min_price(complete_ad_cfg:dict[str, object]) -> None:
cfg_copy = complete_ad_cfg.copy()
cfg_copy["auto_price_reduction"] = {
"enabled": True,
"strategy": "FIXED",
"amount": 5
}
with pytest.raises(ContextualValidationError, match = "min_price must be specified"):
Ad.model_validate(cfg_copy)
@pytest.mark.unit
def test_ad_model_min_price_must_not_exceed_price(complete_ad_cfg:dict[str, object]) -> None:
cfg_copy = complete_ad_cfg.copy()
cfg_copy["price"] = 100
cfg_copy["auto_price_reduction"] = {
"enabled": True,
"strategy": "FIXED",
"amount": 5,
"min_price": 150,
"delay_reposts": 0,
"delay_days": 0
}
with pytest.raises(ContextualValidationError, match = "min_price must not exceed price"):
Ad.model_validate(cfg_copy)
@pytest.mark.unit
def test_calculate_auto_price_with_missing_strategy() -> None:
"""Test calculate_auto_price when strategy is None but enabled is True (defensive check)"""
# Use model_construct to bypass validation and reach defensive lines 234-235
config = AutoPriceReductionConfig.model_construct(
enabled = True, strategy = None, amount = None, min_price = 50
)
result = calculate_auto_price(
base_price = 100,
auto_price_reduction = config,
target_reduction_cycle = 1
)
assert result == 100 # Should return base price when strategy is None
@pytest.mark.unit
def test_calculate_auto_price_with_missing_amount() -> None:
"""Test calculate_auto_price when amount is None but enabled is True (defensive check)"""
# Use model_construct to bypass validation and reach defensive lines 234-235
config = AutoPriceReductionConfig.model_construct(
enabled = True, strategy = "FIXED", amount = None, min_price = 50
)
result = calculate_auto_price(
base_price = 100,
auto_price_reduction = config,
target_reduction_cycle = 1
)
assert result == 100 # Should return base price when amount is None
@pytest.mark.unit
def test_calculate_auto_price_raises_when_min_price_none_and_enabled() -> None:
"""Test that calculate_auto_price raises ValueError when min_price is None during calculation (defensive check)"""
# Use model_construct to bypass validation and reach defensive line 237-238
config = AutoPriceReductionConfig.model_construct(
enabled = True, strategy = "FIXED", amount = 10, min_price = None
)
with pytest.raises(ValueError, match = "min_price must be specified when auto_price_reduction is enabled"):
calculate_auto_price(
base_price = 100,
auto_price_reduction = config,
target_reduction_cycle = 1
)
@pytest.mark.unit
def test_auto_price_reduction_config_requires_amount_when_enabled() -> None:
"""Test AutoPriceReductionConfig validator requires amount when enabled"""
with pytest.raises(ValueError, match = "amount must be specified when auto_price_reduction is enabled"):
AutoPriceReductionConfig(enabled = True, strategy = "FIXED", amount = None, min_price = 50)

View File

@@ -5,7 +5,7 @@ import copy, io, json, logging, os, tempfile # isort: skip
from collections.abc import Generator
from contextlib import redirect_stdout
from datetime import timedelta
from pathlib import Path
from pathlib import Path, PureWindowsPath
from typing import Any, cast
from unittest.mock import AsyncMock, MagicMock, patch
@@ -1072,6 +1072,124 @@ class TestKleinanzeigenBotShippingOptions:
# Verify the file was created in the temporary directory
assert ad_file.exists()
@pytest.mark.asyncio
async def test_cross_drive_path_fallback_windows(self, test_bot:KleinanzeigenBot, base_ad_config:dict[str, Any]) -> None:
"""Test that cross-drive path handling falls back to absolute path on Windows."""
# Create ad config
ad_cfg = Ad.model_validate(base_ad_config | {
"updated_on": "2024-01-01T00:00:00",
"created_on": "2024-01-01T00:00:00",
"auto_price_reduction": {
"enabled": True,
"strategy": "FIXED",
"amount": 10,
"min_price": 50,
"delay_reposts": 0,
"delay_days": 0
},
"price": 100,
"repost_count": 1,
"price_reduction_count": 0
})
ad_cfg.update_content_hash()
ad_cfg_orig = ad_cfg.model_dump()
# Simulate Windows cross-drive scenario
# Config on D:, ad file on C:
test_bot.config_file_path = "D:\\project\\config.yaml"
ad_file = "C:\\temp\\test_ad.yaml"
# Create a sentinel exception to abort publish_ad early
class _SentinelException(Exception):
pass
# Track what path argument __apply_auto_price_reduction receives
recorded_path:list[str] = []
def mock_apply_auto_price_reduction(ad_cfg:Ad, ad_cfg_orig:dict[str, Any], ad_file_relative:str) -> None:
recorded_path.append(ad_file_relative)
raise _SentinelException("Abort early for test")
# Mock Path to use PureWindowsPath for testing cross-drive behavior
with patch("kleinanzeigen_bot.Path", PureWindowsPath), \
patch.object(test_bot, "_KleinanzeigenBot__apply_auto_price_reduction", side_effect = mock_apply_auto_price_reduction), \
patch.object(test_bot, "web_open", new_callable = AsyncMock), \
patch.object(test_bot, "delete_ad", new_callable = AsyncMock):
# Call publish_ad and expect sentinel exception
try:
await test_bot.publish_ad(ad_file, ad_cfg, ad_cfg_orig, [], AdUpdateStrategy.REPLACE)
pytest.fail("Expected _SentinelException to be raised")
except _SentinelException:
# This is expected - the test aborts early
pass
# Verify the path argument is the absolute path (fallback behavior)
assert len(recorded_path) == 1
assert recorded_path[0] == ad_file, f"Expected absolute path fallback, got: {recorded_path[0]}"
@pytest.mark.asyncio
async def test_auto_price_reduction_only_on_replace_not_update(
self,
test_bot:KleinanzeigenBot,
base_ad_config:dict[str, Any],
tmp_path:Path
) -> None:
"""Test that auto price reduction is ONLY applied on REPLACE mode, not UPDATE."""
# Create ad with auto price reduction enabled
ad_cfg = Ad.model_validate(base_ad_config | {
"id": 12345,
"price": 200,
"auto_price_reduction": {
"enabled": True,
"strategy": "FIXED",
"amount": 50,
"min_price": 50,
"delay_reposts": 0,
"delay_days": 0
},
"repost_count": 1,
"price_reduction_count": 0,
"updated_on": "2024-01-01T00:00:00",
"created_on": "2024-01-01T00:00:00"
})
ad_cfg.update_content_hash()
ad_cfg_orig = ad_cfg.model_dump()
# Mock the private __apply_auto_price_reduction method
with patch.object(test_bot, "_KleinanzeigenBot__apply_auto_price_reduction") as mock_apply:
# Mock other dependencies
mock_response = {"statusCode": 200, "statusMessage": "OK", "content": "{}"}
with patch.object(test_bot, "web_find", new_callable = AsyncMock), \
patch.object(test_bot, "web_input", new_callable = AsyncMock), \
patch.object(test_bot, "web_click", new_callable = AsyncMock), \
patch.object(test_bot, "web_open", new_callable = AsyncMock), \
patch.object(test_bot, "web_select", new_callable = AsyncMock), \
patch.object(test_bot, "web_check", new_callable = AsyncMock, return_value = False), \
patch.object(test_bot, "web_await", new_callable = AsyncMock), \
patch.object(test_bot, "web_sleep", new_callable = AsyncMock), \
patch.object(test_bot, "web_execute", new_callable = AsyncMock, return_value = mock_response), \
patch.object(test_bot, "web_request", new_callable = AsyncMock, return_value = mock_response), \
patch.object(test_bot, "web_scroll_page_down", new_callable = AsyncMock), \
patch.object(test_bot, "web_find_all", new_callable = AsyncMock, return_value = []), \
patch.object(test_bot, "check_and_wait_for_captcha", new_callable = AsyncMock), \
patch("builtins.input", return_value = ""), \
patch("kleinanzeigen_bot.utils.misc.ainput", new_callable = AsyncMock, return_value = ""):
test_bot.page = MagicMock()
test_bot.page.url = "https://www.kleinanzeigen.de/p-anzeige-aufgeben-bestaetigung.html?adId=12345"
test_bot.config.publishing.delete_old_ads = "BEFORE_PUBLISH"
# Test REPLACE mode - should call __apply_auto_price_reduction
await test_bot.publish_ad(str(tmp_path / "ad.yaml"), ad_cfg, ad_cfg_orig, [], AdUpdateStrategy.REPLACE)
assert mock_apply.call_count == 1, "Auto price reduction should be called on REPLACE"
# Reset mock
mock_apply.reset_mock()
# Test MODIFY mode - should NOT call __apply_auto_price_reduction
await test_bot.publish_ad(str(tmp_path / "ad.yaml"), ad_cfg, ad_cfg_orig, [], AdUpdateStrategy.MODIFY)
assert mock_apply.call_count == 0, "Auto price reduction should NOT be called on MODIFY"
@pytest.mark.asyncio
async def test_special_attributes_with_non_string_values(self, test_bot:KleinanzeigenBot, base_ad_config:dict[str, Any]) -> None:
"""Test that special attributes with non-string values are converted to strings."""
@@ -1462,3 +1580,45 @@ def test_file_logger_writes_message(tmp_path:Path, caplog:pytest.LogCaptureFixtu
with open(log_path, "r", encoding = "utf-8") as f:
contents = f.read()
assert "Logger test log message" in contents
class TestPriceReductionPersistence:
"""Tests for price_reduction_count persistence logic."""
@pytest.mark.unit
def test_persistence_logic_saves_when_count_positive(self) -> None:
"""Test the conditional logic that decides whether to persist price_reduction_count."""
# Simulate the logic from publish_ad lines 1076-1079
ad_cfg_orig:dict[str, Any] = {}
# Test case 1: price_reduction_count = 3 (should persist)
price_reduction_count = 3
if price_reduction_count is not None and price_reduction_count > 0:
ad_cfg_orig["price_reduction_count"] = price_reduction_count
assert "price_reduction_count" in ad_cfg_orig
assert ad_cfg_orig["price_reduction_count"] == 3
@pytest.mark.unit
def test_persistence_logic_skips_when_count_zero(self) -> None:
"""Test that price_reduction_count == 0 does not get persisted."""
ad_cfg_orig:dict[str, Any] = {}
# Test case 2: price_reduction_count = 0 (should NOT persist)
price_reduction_count = 0
if price_reduction_count is not None and price_reduction_count > 0:
ad_cfg_orig["price_reduction_count"] = price_reduction_count
assert "price_reduction_count" not in ad_cfg_orig
@pytest.mark.unit
def test_persistence_logic_skips_when_count_none(self) -> None:
"""Test that price_reduction_count == None does not get persisted."""
ad_cfg_orig:dict[str, Any] = {}
# Test case 3: price_reduction_count = None (should NOT persist)
price_reduction_count = None
if price_reduction_count is not None and price_reduction_count > 0:
ad_cfg_orig["price_reduction_count"] = price_reduction_count
assert "price_reduction_count" not in ad_cfg_orig

View File

@@ -0,0 +1,550 @@
# SPDX-FileCopyrightText: © Sebastian Thomschke and contributors
# SPDX-License-Identifier: AGPL-3.0-or-later
# SPDX-ArtifactOfProjectHomePage: https://github.com/Second-Hand-Friends/kleinanzeigen-bot/
import logging
from datetime import datetime, timedelta, timezone
from gettext import gettext as _
from types import SimpleNamespace
from typing import Any, Protocol, runtime_checkable
import pytest
import kleinanzeigen_bot
from kleinanzeigen_bot.model.ad_model import calculate_auto_price
from kleinanzeigen_bot.model.config_model import AutoPriceReductionConfig
from kleinanzeigen_bot.utils.pydantics import ContextualValidationError
@runtime_checkable
class _ApplyAutoPriceReduction(Protocol):
def __call__(self, ad_cfg:SimpleNamespace, ad_cfg_orig:dict[str, Any], ad_file_relative:str) -> None:
...
@pytest.fixture
def apply_auto_price_reduction() -> _ApplyAutoPriceReduction:
# Return the module-level function directly (no more name-mangling!)
return kleinanzeigen_bot.apply_auto_price_reduction # type: ignore[return-value]
@pytest.mark.unit
def test_initial_posting_uses_base_price() -> None:
config = AutoPriceReductionConfig(enabled = True, strategy = "PERCENTAGE", amount = 10, min_price = 50)
assert calculate_auto_price(
base_price = 100,
auto_price_reduction = config,
target_reduction_cycle = 0
) == 100
@pytest.mark.unit
def test_auto_price_returns_none_without_base_price() -> None:
config = AutoPriceReductionConfig(enabled = True, strategy = "PERCENTAGE", amount = 10, min_price = 10)
assert calculate_auto_price(
base_price = None,
auto_price_reduction = config,
target_reduction_cycle = 3
) is None
@pytest.mark.unit
def test_negative_price_reduction_count_is_treated_like_zero() -> None:
config = AutoPriceReductionConfig(enabled = True, strategy = "PERCENTAGE", amount = 25, min_price = 50)
assert calculate_auto_price(
base_price = 100,
auto_price_reduction = config,
target_reduction_cycle = -3
) == 100
@pytest.mark.unit
def test_missing_price_reduction_returns_base_price() -> None:
assert calculate_auto_price(
base_price = 150,
auto_price_reduction = None,
target_reduction_cycle = 4
) == 150
@pytest.mark.unit
def test_percentage_reduction_on_float_rounds_half_up() -> None:
config = AutoPriceReductionConfig(enabled = True, strategy = "PERCENTAGE", amount = 12.5, min_price = 50)
assert calculate_auto_price(
base_price = 99.99,
auto_price_reduction = config,
target_reduction_cycle = 1
) == 87
@pytest.mark.unit
def test_fixed_reduction_on_float_rounds_half_up() -> None:
config = AutoPriceReductionConfig(enabled = True, strategy = "FIXED", amount = 12.4, min_price = 50)
assert calculate_auto_price(
base_price = 80.51,
auto_price_reduction = config,
target_reduction_cycle = 1
) == 68
@pytest.mark.unit
def test_percentage_price_reduction_over_time() -> None:
config = AutoPriceReductionConfig(enabled = True, strategy = "PERCENTAGE", amount = 10, min_price = 50)
assert calculate_auto_price(
base_price = 100,
auto_price_reduction = config,
target_reduction_cycle = 1
) == 90
assert calculate_auto_price(
base_price = 100,
auto_price_reduction = config,
target_reduction_cycle = 2
) == 81
assert calculate_auto_price(
base_price = 100,
auto_price_reduction = config,
target_reduction_cycle = 3
) == 73
@pytest.mark.unit
def test_fixed_price_reduction_over_time() -> None:
config = AutoPriceReductionConfig(enabled = True, strategy = "FIXED", amount = 15, min_price = 50)
assert calculate_auto_price(
base_price = 100,
auto_price_reduction = config,
target_reduction_cycle = 1
) == 85
assert calculate_auto_price(
base_price = 100,
auto_price_reduction = config,
target_reduction_cycle = 2
) == 70
assert calculate_auto_price(
base_price = 100,
auto_price_reduction = config,
target_reduction_cycle = 3
) == 55
@pytest.mark.unit
def test_min_price_boundary_is_respected() -> None:
config = AutoPriceReductionConfig(enabled = True, strategy = "FIXED", amount = 20, min_price = 50)
assert calculate_auto_price(
base_price = 100,
auto_price_reduction = config,
target_reduction_cycle = 5
) == 50
@pytest.mark.unit
def test_min_price_zero_is_allowed() -> None:
config = AutoPriceReductionConfig(enabled = True, strategy = "FIXED", amount = 5, min_price = 0)
assert calculate_auto_price(
base_price = 20,
auto_price_reduction = config,
target_reduction_cycle = 5
) == 0
@pytest.mark.unit
def test_missing_min_price_raises_error() -> None:
# min_price validation happens at config initialization when enabled=True
with pytest.raises(ContextualValidationError, match = "min_price must be specified"):
AutoPriceReductionConfig.model_validate({"enabled": True, "strategy": "PERCENTAGE", "amount": 50, "min_price": None})
@pytest.mark.unit
def test_percentage_above_100_raises_error() -> None:
with pytest.raises(ContextualValidationError, match = "Percentage reduction amount must not exceed 100"):
AutoPriceReductionConfig.model_validate({"enabled": True, "strategy": "PERCENTAGE", "amount": 150, "min_price": 50})
@pytest.mark.unit
def test_feature_disabled_path_leaves_price_unchanged() -> None:
config = AutoPriceReductionConfig(enabled = False, strategy = "PERCENTAGE", amount = 25, min_price = 50)
price = calculate_auto_price(
base_price = 100,
auto_price_reduction = config,
target_reduction_cycle = 4
)
assert price == 100
@pytest.mark.unit
def test_apply_auto_price_reduction_logs_drop(
caplog:pytest.LogCaptureFixture,
apply_auto_price_reduction:_ApplyAutoPriceReduction
) -> None:
ad_cfg = SimpleNamespace(
price = 200,
auto_price_reduction = AutoPriceReductionConfig(
enabled = True, strategy = "PERCENTAGE", amount = 25, min_price = 50, delay_reposts = 0, delay_days = 0
),
price_reduction_count = 0,
repost_count = 1
)
ad_orig:dict[str, Any] = {}
with caplog.at_level(logging.INFO):
apply_auto_price_reduction(ad_cfg, ad_orig, "ad_test.yaml")
expected = _("Auto price reduction applied: %s -> %s after %s reduction cycles") % (200, 150, 1)
assert any(expected in message for message in caplog.messages)
assert ad_cfg.price == 150
assert ad_cfg.price_reduction_count == 1
# Note: price_reduction_count is NOT persisted to ad_orig until after successful publish
assert "price_reduction_count" not in ad_orig
@pytest.mark.unit
def test_apply_auto_price_reduction_logs_unchanged_price_at_floor(
caplog:pytest.LogCaptureFixture,
apply_auto_price_reduction:_ApplyAutoPriceReduction
) -> None:
# Test scenario: price has been reduced to just above min_price,
# and the next reduction would drop it below, so it gets clamped
ad_cfg = SimpleNamespace(
price = 95,
auto_price_reduction = AutoPriceReductionConfig(
enabled = True, strategy = "FIXED", amount = 10, min_price = 90, delay_reposts = 0, delay_days = 0
),
price_reduction_count = 0,
repost_count = 1
)
ad_orig:dict[str, Any] = {}
with caplog.at_level(logging.INFO):
apply_auto_price_reduction(ad_cfg, ad_orig, "ad_test.yaml")
# Price: 95 - 10 = 85, clamped to 90 (floor)
# So the effective price is 90, not 95, meaning reduction was applied
expected = _("Auto price reduction applied: %s -> %s after %s reduction cycles") % (95, 90, 1)
assert any(expected in message for message in caplog.messages)
assert ad_cfg.price == 90
assert ad_cfg.price_reduction_count == 1
assert "price_reduction_count" not in ad_orig
@pytest.mark.unit
def test_apply_auto_price_reduction_warns_when_price_missing(
caplog:pytest.LogCaptureFixture,
apply_auto_price_reduction:_ApplyAutoPriceReduction
) -> None:
ad_cfg = SimpleNamespace(
price = None,
auto_price_reduction = AutoPriceReductionConfig(
enabled = True, strategy = "PERCENTAGE", amount = 25, min_price = 10, delay_reposts = 0, delay_days = 0
),
price_reduction_count = 2,
repost_count = 2
)
ad_orig:dict[str, Any] = {}
with caplog.at_level(logging.WARNING):
apply_auto_price_reduction(ad_cfg, ad_orig, "ad_warning.yaml")
expected = _("Auto price reduction is enabled for [%s] but no price is configured.") % ("ad_warning.yaml",)
assert any(expected in message for message in caplog.messages)
assert ad_cfg.price is None
@pytest.mark.unit
def test_apply_auto_price_reduction_warns_when_min_price_equals_price(
caplog:pytest.LogCaptureFixture,
apply_auto_price_reduction:_ApplyAutoPriceReduction
) -> None:
ad_cfg = SimpleNamespace(
price = 100,
auto_price_reduction = AutoPriceReductionConfig(
enabled = True, strategy = "PERCENTAGE", amount = 25, min_price = 100, delay_reposts = 0, delay_days = 0
),
price_reduction_count = 0,
repost_count = 1
)
ad_orig:dict[str, Any] = {}
with caplog.at_level(logging.WARNING):
apply_auto_price_reduction(ad_cfg, ad_orig, "ad_equal_prices.yaml")
expected = _("Auto price reduction is enabled for [%s] but min_price equals price (%s) - no reductions will occur.") % ("ad_equal_prices.yaml", 100)
assert any(expected in message for message in caplog.messages)
assert ad_cfg.price == 100
assert ad_cfg.price_reduction_count == 0
@pytest.mark.unit
def test_apply_auto_price_reduction_respects_repost_delay(
caplog:pytest.LogCaptureFixture,
apply_auto_price_reduction:_ApplyAutoPriceReduction
) -> None:
ad_cfg = SimpleNamespace(
price = 200,
auto_price_reduction = AutoPriceReductionConfig(
enabled = True, strategy = "PERCENTAGE", amount = 25, min_price = 50, delay_reposts = 3, delay_days = 0
),
price_reduction_count = 0,
repost_count = 2
)
ad_orig:dict[str, Any] = {}
with caplog.at_level(logging.INFO):
apply_auto_price_reduction(ad_cfg, ad_orig, "ad_delay.yaml")
assert ad_cfg.price == 200
delayed_message = _("Auto price reduction delayed for [%s]: waiting %s more reposts (completed %s, applied %s reductions)") % ("ad_delay.yaml", 2, 2, 0)
assert any(delayed_message in message for message in caplog.messages)
@pytest.mark.unit
def test_apply_auto_price_reduction_after_repost_delay_reduces_once(
apply_auto_price_reduction:_ApplyAutoPriceReduction
) -> None:
ad_cfg = SimpleNamespace(
price = 100,
auto_price_reduction = AutoPriceReductionConfig(
enabled = True, strategy = "PERCENTAGE", amount = 10, min_price = 50, delay_reposts = 2, delay_days = 0
),
price_reduction_count = 0,
repost_count = 3
)
ad_cfg_orig:dict[str, Any] = {}
apply_auto_price_reduction(ad_cfg, ad_cfg_orig, "ad_after_delay.yaml")
assert ad_cfg.price == 90
assert ad_cfg.price_reduction_count == 1
# Note: price_reduction_count is NOT persisted to ad_orig until after successful publish
assert "price_reduction_count" not in ad_cfg_orig
@pytest.mark.unit
def test_apply_auto_price_reduction_waits_when_reduction_already_applied(
caplog:pytest.LogCaptureFixture,
apply_auto_price_reduction:_ApplyAutoPriceReduction
) -> None:
ad_cfg = SimpleNamespace(
price = 100,
auto_price_reduction = AutoPriceReductionConfig(
enabled = True, strategy = "PERCENTAGE", amount = 10, min_price = 50, delay_reposts = 0, delay_days = 0
),
price_reduction_count = 3,
repost_count = 3
)
ad_orig:dict[str, Any] = {}
with caplog.at_level(logging.DEBUG):
apply_auto_price_reduction(ad_cfg, ad_orig, "ad_already.yaml")
expected = _("Auto price reduction already applied for [%s]: %s reductions match %s eligible reposts") % ("ad_already.yaml", 3, 3)
assert any(expected in message for message in caplog.messages)
assert ad_cfg.price == 100
assert ad_cfg.price_reduction_count == 3
assert "price_reduction_count" not in ad_orig
@pytest.mark.unit
def test_apply_auto_price_reduction_respects_day_delay(
monkeypatch:pytest.MonkeyPatch,
caplog:pytest.LogCaptureFixture,
apply_auto_price_reduction:_ApplyAutoPriceReduction
) -> None:
reference = datetime(2025, 1, 1, tzinfo = timezone.utc)
ad_cfg = SimpleNamespace(
price = 150,
auto_price_reduction = AutoPriceReductionConfig(
enabled = True, strategy = "PERCENTAGE", amount = 25, min_price = 50, delay_reposts = 0, delay_days = 3
),
price_reduction_count = 0,
repost_count = 1,
updated_on = reference,
created_on = reference
)
monkeypatch.setattr("kleinanzeigen_bot.misc.now", lambda: reference + timedelta(days = 1))
ad_orig:dict[str, Any] = {}
with caplog.at_level("INFO"):
apply_auto_price_reduction(ad_cfg, ad_orig, "ad_delay_days.yaml")
assert ad_cfg.price == 150
delayed_message = _("Auto price reduction delayed for [%s]: waiting %s days (elapsed %s)") % ("ad_delay_days.yaml", 3, 1)
assert any(delayed_message in message for message in caplog.messages)
@pytest.mark.unit
def test_apply_auto_price_reduction_runs_after_delays(
monkeypatch:pytest.MonkeyPatch,
apply_auto_price_reduction:_ApplyAutoPriceReduction
) -> None:
reference = datetime(2025, 1, 1, tzinfo = timezone.utc)
ad_cfg = SimpleNamespace(
price = 120,
auto_price_reduction = AutoPriceReductionConfig(
enabled = True, strategy = "PERCENTAGE", amount = 25, min_price = 60, delay_reposts = 2, delay_days = 3
),
price_reduction_count = 0,
repost_count = 3,
updated_on = reference - timedelta(days = 5),
created_on = reference - timedelta(days = 10)
)
monkeypatch.setattr("kleinanzeigen_bot.misc.now", lambda: reference)
ad_orig:dict[str, Any] = {}
apply_auto_price_reduction(ad_cfg, ad_orig, "ad_ready.yaml")
assert ad_cfg.price == 90
@pytest.mark.unit
def test_apply_auto_price_reduction_delayed_when_timestamp_missing(
caplog:pytest.LogCaptureFixture,
apply_auto_price_reduction:_ApplyAutoPriceReduction
) -> None:
ad_cfg = SimpleNamespace(
price = 200,
auto_price_reduction = AutoPriceReductionConfig(
enabled = True, strategy = "FIXED", amount = 20, min_price = 50, delay_reposts = 0, delay_days = 2
),
price_reduction_count = 0,
repost_count = 1,
updated_on = None,
created_on = None
)
ad_orig:dict[str, Any] = {}
with caplog.at_level("INFO"):
apply_auto_price_reduction(ad_cfg, ad_orig, "ad_missing_time.yaml")
expected = _("Auto price reduction delayed for [%s]: waiting %s days but publish timestamp missing") % ("ad_missing_time.yaml", 2)
assert any(expected in message for message in caplog.messages)
@pytest.mark.unit
def test_fractional_reduction_increments_counter_even_when_price_unchanged(
caplog:pytest.LogCaptureFixture,
apply_auto_price_reduction:_ApplyAutoPriceReduction
) -> None:
# Test that small fractional reductions increment the counter even when rounded price doesn't change
# This allows cumulative reductions to eventually show visible effect
ad_cfg = SimpleNamespace(
price = 100,
auto_price_reduction = AutoPriceReductionConfig(
enabled = True, strategy = "FIXED", amount = 0.3, min_price = 50, delay_reposts = 0, delay_days = 0
),
price_reduction_count = 0,
repost_count = 1
)
ad_orig:dict[str, Any] = {}
with caplog.at_level(logging.INFO):
apply_auto_price_reduction(ad_cfg, ad_orig, "ad_fractional.yaml")
# Price: 100 - 0.3 = 99.7, rounds to 100 (no visible change)
# But counter should still increment for future cumulative reductions
expected = _("Auto price reduction kept price %s after attempting %s reduction cycles") % (100, 1)
assert any(expected in message for message in caplog.messages)
assert ad_cfg.price == 100
assert ad_cfg.price_reduction_count == 1 # Counter incremented despite no visible price change
assert "price_reduction_count" not in ad_orig
@pytest.mark.unit
def test_reduction_value_zero_raises_error() -> None:
with pytest.raises(ContextualValidationError, match = "Input should be greater than 0"):
AutoPriceReductionConfig.model_validate({"enabled": True, "strategy": "PERCENTAGE", "amount": 0, "min_price": 50})
@pytest.mark.unit
def test_reduction_value_negative_raises_error() -> None:
with pytest.raises(ContextualValidationError, match = "Input should be greater than 0"):
AutoPriceReductionConfig.model_validate({"enabled": True, "strategy": "FIXED", "amount": -5, "min_price": 50})
@pytest.mark.unit
def test_percentage_reduction_100_percent() -> None:
config = AutoPriceReductionConfig(enabled = True, strategy = "PERCENTAGE", amount = 100, min_price = 0)
assert calculate_auto_price(
base_price = 150,
auto_price_reduction = config,
target_reduction_cycle = 1
) == 0
@pytest.mark.unit
def test_extreme_reduction_cycles() -> None:
# Test that extreme cycle counts don't cause performance issues or errors
config = AutoPriceReductionConfig(enabled = True, strategy = "PERCENTAGE", amount = 10, min_price = 0)
result = calculate_auto_price(
base_price = 1000,
auto_price_reduction = config,
target_reduction_cycle = 100
)
# With commercial rounding (round after each step), price stabilizes at 5
# because 5 * 0.9 = 4.5 rounds back to 5 with ROUND_HALF_UP
assert result == 5
@pytest.mark.unit
def test_commercial_rounding_each_step() -> None:
"""Test that commercial rounding is applied after each reduction step, not just at the end."""
config = AutoPriceReductionConfig(enabled = True, strategy = "PERCENTAGE", amount = 10, min_price = 0)
# With 135 EUR and 2x 10% reduction:
# Step 1: 135 * 0.9 = 121.5 → rounds to 122 EUR
# Step 2: 122 * 0.9 = 109.8 → rounds to 110 EUR
# (Without intermediate rounding, it would be: 135 * 0.9^2 = 109.35 → 109 EUR)
result = calculate_auto_price(
base_price = 135,
auto_price_reduction = config,
target_reduction_cycle = 2
)
assert result == 110 # Commercial rounding result
@pytest.mark.unit
def test_extreme_reduction_cycles_with_floor() -> None:
# Test that extreme cycles stop at min_price and don't cause issues
config = AutoPriceReductionConfig(enabled = True, strategy = "PERCENTAGE", amount = 10, min_price = 50)
result = calculate_auto_price(
base_price = 1000,
auto_price_reduction = config,
target_reduction_cycle = 1000
)
# Should stop at min_price, not go to 0, regardless of cycle count
assert result == 50
@pytest.mark.unit
def test_fractional_min_price_is_rounded_up_with_ceiling() -> None:
# Test that fractional min_price is rounded UP using ROUND_CEILING
# This prevents the price from going below min_price due to int() conversion
# Example: min_price=90.5 should become floor of 91, not 90
config = AutoPriceReductionConfig(enabled = True, strategy = "FIXED", amount = 10, min_price = 90.5)
# Start at 100, reduce by 10 = 90
# But min_price=90.5 rounds UP to 91 with ROUND_CEILING
# So the result should be 91, not 90
result = calculate_auto_price(
base_price = 100,
auto_price_reduction = config,
target_reduction_cycle = 1
)
assert result == 91 # Rounded up from 90.5 floor
# Verify with another fractional value
config2 = AutoPriceReductionConfig(enabled = True, strategy = "FIXED", amount = 5, min_price = 49.1)
result2 = calculate_auto_price(
base_price = 60,
auto_price_reduction = config2,
target_reduction_cycle = 3 # 60 - 5 - 5 - 5 = 45, clamped to ceil(49.1) = 50
)
assert result2 == 50 # Rounded up from 49.1 floor

View File

@@ -151,6 +151,14 @@ class TestFormatValidationError:
[{"loc": ("decimal_max_digits",), "msg": "dummy", "type": "decimal_max_digits", "ctx": {"max_digits": 10, "expected_plural": "s"}}],
"Decimal input should have no more than 10 digits in total",
),
(
[{"loc": ("decimal_max_places",), "msg": "dummy", "type": "decimal_max_places", "ctx": {"decimal_places": 2, "expected_plural": "s"}}],
"Decimal input should have no more than 2 decimal places",
),
(
[{"loc": ("decimal_whole_digits",), "msg": "dummy", "type": "decimal_whole_digits", "ctx": {"whole_digits": 3, "expected_plural": ""}}],
"Decimal input should have no more than 3 digits before the decimal point",
),
# Complex number related errors
(
[{"loc": ("complex",), "msg": "dummy", "type": "complex_type"}],