Package mvpa :: Package tests :: Module test_regr
[hide private]
[frames] | no frames]

Source Code for Module mvpa.tests.test_regr

  1  # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- 
  2  # vi: set ft=python sts=4 ts=4 sw=4 et: 
  3  ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## 
  4  # 
  5  #   See COPYING file distributed along with the PyMVPA package for the 
  6  #   copyright and license terms. 
  7  # 
  8  ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## 
  9  """Unit tests for PyMVPA Regressions""" 
 10   
 11  from mvpa.base import externals 
 12  from mvpa.support.copy import deepcopy 
 13   
 14  from mvpa.datasets import Dataset 
 15  from mvpa.mappers.mask import MaskMapper 
 16  from mvpa.datasets.splitters import NFoldSplitter, OddEvenSplitter 
 17   
 18  from mvpa.misc.errorfx import RMSErrorFx, RelativeRMSErrorFx, \ 
 19       CorrErrorFx, CorrErrorPFx 
 20   
 21  from mvpa.clfs.meta import SplitClassifier 
 22  from mvpa.clfs.transerror import TransferError 
 23  from mvpa.misc.exceptions import UnknownStateError 
 24   
 25  from mvpa.algorithms.cvtranserror import CrossValidatedTransferError 
 26   
 27  from tests_warehouse import * 
 28  from tests_warehouse_clfs import * 
 29   
30 -class RegressionsTests(unittest.TestCase):
31 32 @sweepargs(ml=clfswh['regression']+regrswh[:])
33 - def testNonRegressions(self, ml):
34 """Test If binary regression-based classifiers have proper tag 35 """ 36 self.failUnless(('binary' in ml._clf_internals) != ml.regression, 37 msg="Inconsistent markin with binary and regression features" 38 " detected in %s having %s" % (ml, `ml._clf_internals`))
39 40 @sweepargs(regr=regrswh['regression'])
41 - def testRegressions(self, regr):
42 """Simple tests on regressions 43 """ 44 ds = datasets['chirp_linear'] 45 46 cve = CrossValidatedTransferError( 47 TransferError(regr, CorrErrorFx()), 48 splitter=NFoldSplitter(), 49 enable_states=['training_confusion', 'confusion']) 50 corr = cve(ds) 51 52 self.failUnless(corr == cve.confusion.stats['CCe']) 53 54 splitregr = SplitClassifier(regr, 55 splitter=OddEvenSplitter(), 56 enable_states=['training_confusion', 'confusion']) 57 splitregr.train(ds) 58 split_corr = splitregr.confusion.stats['CCe'] 59 split_corr_tr = splitregr.training_confusion.stats['CCe'] 60 61 for confusion, error in ((cve.confusion, corr), 62 (splitregr.confusion, split_corr), 63 (splitregr.training_confusion, split_corr_tr), 64 ): 65 #TODO: test confusion statistics 66 # Part of it for now -- CCe 67 for conf in confusion.summaries: 68 stats = conf.stats 69 self.failUnless(stats['CCe'] < 0.5) 70 self.failUnlessEqual(stats['CCe'], stats['Summary CCe']) 71 72 s0 = confusion.asstring(short=True) 73 s1 = confusion.asstring(short=False) 74 75 for s in [s0, s1]: 76 self.failUnless(len(s) > 10, 77 msg="We should get some string representation " 78 "of regression summary. Got %s" % s) 79 80 self.failUnless(error < 0.2, 81 msg="Regressions should perform well on a simple " 82 "dataset. Got correlation error of %s " % error) 83 84 # Test access to summary statistics 85 # YOH: lets start making testing more reliable. 86 # p-value for such accident to have is verrrry tiny, 87 # so if regression works -- it better has at least 0.5 ;) 88 # otherwise fix it! ;) 89 #if cfg.getboolean('tests', 'labile', default='yes'): 90 self.failUnless(confusion.stats['CCe'] < 0.5) 91 92 split_predictions = splitregr.predict(ds.samples) # just to check if it works fine
93 94 # To test basic plotting 95 #import pylab as P 96 #cve.confusion.plot() 97 #P.show() 98 99 @sweepargs(clf=clfswh['regression'])
100 - def testRegressionsClassifiers(self, clf):
101 """Simple tests on regressions being used as classifiers 102 """ 103 # check if we get values set correctly 104 clf.states._changeTemporarily(enable_states=['values']) 105 self.failUnlessRaises(UnknownStateError, clf.states['values']._get) 106 cv = CrossValidatedTransferError( 107 TransferError(clf), 108 NFoldSplitter(), 109 enable_states=['confusion', 'training_confusion']) 110 ds = datasets['uni2small'] 111 cverror = cv(ds) 112 self.failUnless(len(clf.values) == ds['chunks', 1].nsamples) 113 clf.states._resetEnabledTemporarily()
114 115 116
117 -def suite():
118 return unittest.makeSuite(RegressionsTests)
119 120 121 if __name__ == '__main__': 122 import runner 123