123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105 |
- require 'svm_toolkit'
- require 'minitest/autorun'
- include SvmToolkit
- class TestEvaluator < MiniTest::Test
- def test_evaluate_accuracy
- performance = Evaluator::OverallAccuracy.new
- assert_equal(0.0, performance.value)
- performance.add_result(0, 0)
- assert_equal(100.0, performance.value)
- performance.add_result(1, 0)
- assert_equal(50.0, performance.value)
- performance2 = Evaluator::OverallAccuracy.new
- assert_equal(true, performance.better_than?(performance2))
- performance2.add_result(1, 1)
- performance2.add_result(1, 1)
- assert_equal(false, performance.better_than?(performance2))
- end
- def test_compute_geometric_mean
- performance = Evaluator::GeometricMean.new
- performance.add_result(0, 0)
- assert_equal(1.0, performance.value)
- performance.add_result(0, 1)
- performance.add_result(1, 1)
- assert_in_delta(0.707, performance.value, 0.01)
- performance.add_result(1, 1)
- performance.add_result(1, 0)
- assert_in_delta(0.577, performance.value, 0.001)
- end
- def test_compute_class_precision
- performance1 = Evaluator::ClassPrecision(0).new
- performance1.add_result(0, 0)
- performance1.add_result(0, 0)
- performance1.add_result(0, 0)
- performance1.add_result(0, 1)
- performance1.add_result(1, 0)
- performance1.add_result(1, 0)
- performance1.add_result(1, 1)
- # 3 correct out of 5 predicted 0s
- assert_in_delta(0.6, performance1.value, 0.001)
- end
- def test_compute_class_precision_2
- performance2 = Evaluator::ClassPrecision(1).new
- performance2.add_result(0, 0)
- performance2.add_result(0, 0)
- performance2.add_result(0, 0)
- performance2.add_result(0, 1)
- performance2.add_result(1, 0)
- performance2.add_result(1, 0)
- performance2.add_result(1, 1)
- # 1 correct out of 2 predicted 1s
- assert_in_delta(0.5, performance2.value, 0.001)
- end
- def test_compute_class_recall
- performance1 = Evaluator::ClassRecall(0).new
- performance1.add_result(0, 0)
- performance1.add_result(0, 0)
- performance1.add_result(0, 0)
- performance1.add_result(0, 1)
- performance1.add_result(1, 0)
- performance1.add_result(1, 0)
- performance1.add_result(1, 1)
- # 3 correct out of the 4 actual 0s
- assert_in_delta(0.75, performance1.value, 0.001)
- end
- def test_compute_class_recall_2
- performance2 = Evaluator::ClassRecall(1).new
- performance2.add_result(0, 0)
- performance2.add_result(0, 0)
- performance2.add_result(0, 0)
- performance2.add_result(0, 1)
- performance2.add_result(1, 0)
- performance2.add_result(1, 0)
- performance2.add_result(1, 1)
- # 1 correct out of the 3 actual 1s
- assert_in_delta(0.333, performance2.value, 0.001)
- end
- def test_compute_mcc
- performance = Evaluator::MatthewsCorrelationCoefficient(1).new
- performance.add_result(0, 0)
- assert_equal(0.0, performance.value)
- performance.add_result(0, 1)
- assert_equal(0.0, performance.value)
- performance.add_result(1, 0)
- assert_equal(-0.5, performance.value)
- performance.add_result(1, 1)
- assert_equal(0.0, performance.value)
- performance.add_result(1, 1)
- assert_in_delta(0.167, performance.value, 0.001)
- performance.add_result(0, 0)
- assert_in_delta(0.333, performance.value, 0.001)
- end
- end
|