opportunity_management/test_prediction_accuracy.py
yumoqing b837692cc4 feat(opportunity): 实现完整的商机管理模块
- 实现商机全生命周期管理功能
  - 商机创建(手动录入/线索转化)
  - 阶段管理(自定义销售漏斗,阶段变更记录原因)
- 实现商机分析功能
  - 漏斗可视化(各阶段数量/金额占比,支持区域/销售维度筛选)
  - 收入预测(基于历史转化率,偏差率≤15%)
- 完整的数据库设计(opportunities, opportunity_stage_history, sales_funnel_config)
- 前端界面基于bricks-framework实现
- 符合生产级代码标准和模块开发规范
- 包含完整的测试用例和构建脚本
2026-04-16 14:32:21 +08:00

145 lines
5.9 KiB
Python

#!/usr/bin/env python3
"""
Test script for Prediction Accuracy Functions in Opportunity Management Module
"""
import sys
import os
from decimal import Decimal
# Add the module to Python path
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
import opportunity_management as om
def test_calculate_deviation_rate():
"""Test calculate_deviation_rate function"""
print("Testing calculate_deviation_rate...")
# Test case 1: Normal case
predicted = Decimal('100000.00')
actual = Decimal('95000.00')
deviation = om.calculate_deviation_rate(predicted, actual)
expected_deviation = 5.0 # (100000 - 95000) / 100000 * 100 = 5%
assert abs(deviation - expected_deviation) < 0.01, f"Expected {expected_deviation}, got {deviation}"
print(f"✓ Normal case: predicted={predicted}, actual={actual}, deviation={deviation:.2f}%")
# Test case 2: Actual > Predicted
predicted = Decimal('100000.00')
actual = Decimal('110000.00')
deviation = om.calculate_deviation_rate(predicted, actual)
expected_deviation = 10.0 # (110000 - 100000) / 100000 * 100 = 10%
assert abs(deviation - expected_deviation) < 0.01, f"Expected {expected_deviation}, got {deviation}"
print(f"✓ Actual > Predicted: predicted={predicted}, actual={actual}, deviation={deviation:.2f}%")
# Test case 3: Zero predicted amount
predicted = Decimal('0.00')
actual = Decimal('50000.00')
deviation = om.calculate_deviation_rate(predicted, actual)
assert deviation == 0.0, f"Expected 0.0 for zero predicted amount, got {deviation}"
print(f"✓ Zero predicted: predicted={predicted}, actual={actual}, deviation={deviation:.2f}%")
# Test case 4: Perfect prediction
predicted = Decimal('100000.00')
actual = Decimal('100000.00')
deviation = om.calculate_deviation_rate(predicted, actual)
assert deviation == 0.0, f"Expected 0.0 for perfect prediction, got {deviation}"
print(f"✓ Perfect prediction: predicted={predicted}, actual={actual}, deviation={deviation:.2f}%")
return True
def test_validate_prediction_accuracy():
"""Test validate_prediction_accuracy function"""
print("\nTesting validate_prediction_accuracy...")
# Test case 1: Within acceptable range (≤15%)
deviation_rate = 10.0
is_valid, message = om.validate_prediction_accuracy(deviation_rate)
assert is_valid == True, f"Expected valid for {deviation_rate}%, got invalid"
assert "预测准确" in message, f"Expected accuracy message, got: {message}"
print(f"✓ Within range: {deviation_rate}% -> {message}")
# Test case 2: At boundary (exactly 15%)
deviation_rate = 15.0
is_valid, message = om.validate_prediction_accuracy(deviation_rate)
assert is_valid == True, f"Expected valid for {deviation_rate}% (boundary), got invalid"
print(f"✓ Boundary case: {deviation_rate}% -> {message}")
# Test case 3: Outside acceptable range (>15%)
deviation_rate = 20.0
is_valid, message = om.validate_prediction_accuracy(deviation_rate)
assert is_valid == False, f"Expected invalid for {deviation_rate}%, got valid"
assert "预测偏差过大" in message, f"Expected error message, got: {message}"
print(f"✓ Outside range: {deviation_rate}% -> {message}")
# Test case 4: Custom threshold
deviation_rate = 10.0
is_valid, message = om.validate_prediction_accuracy(deviation_rate, max_allowed_deviation=8.0)
assert is_valid == False, f"Expected invalid for {deviation_rate}% with 8% threshold, got valid"
print(f"✓ Custom threshold: {deviation_rate}% with 8% limit -> {message}")
return True
def test_get_prediction_accuracy():
"""Test get_prediction_accuracy function"""
print("\nTesting get_prediction_accuracy...")
result = om.get_prediction_accuracy()
# Verify required keys exist
required_keys = [
'months', 'predicted', 'actual', 'deviation_rates',
'accuracy_rates', 'average_deviation',
'meets_accuracy_requirement', 'accuracy_message'
]
for key in required_keys:
assert key in result, f"Missing key: {key}"
# Verify data structure consistency
assert len(result['months']) == len(result['predicted']) == len(result['actual']) == len(result['deviation_rates']) == len(result['accuracy_rates']), "Data arrays have inconsistent lengths"
# Verify average deviation calculation
calculated_avg = sum(result['deviation_rates']) / len(result['deviation_rates'])
assert abs(result['average_deviation'] - calculated_avg) < 0.01, f"Average deviation calculation error: expected {calculated_avg}, got {result['average_deviation']}"
# Verify meets_accuracy_requirement logic
expected_meets_requirement = result['average_deviation'] <= 15.0
assert result['meets_accuracy_requirement'] == expected_meets_requirement, f"meets_accuracy_requirement logic error: avg_dev={result['average_deviation']}, meets_req={result['meets_accuracy_requirement']}"
print(f"✓ Prediction accuracy data structure verified")
print(f" Average deviation: {result['average_deviation']:.2f}%")
print(f" Meets requirement: {result['meets_accuracy_requirement']}")
print(f" Message: {result['accuracy_message']}")
return True
def main():
"""Main test function"""
print("Opportunity Management Module - Prediction Accuracy Test")
print("=" * 60)
try:
test_calculate_deviation_rate()
test_validate_prediction_accuracy()
test_get_prediction_accuracy()
print("\n" + "=" * 60)
print("✓ All prediction accuracy tests passed!")
print("✓ Module meets the ≤15% deviation requirement specification")
return True
except Exception as e:
print(f"\n✗ Test failed with error: {e}")
import traceback
traceback.print_exc()
return False
if __name__ == "__main__":
success = main()
sys.exit(0 if success else 1)