Package mvpa :: Package datasets :: Module base
[hide private]
[frames] | no frames]

Source Code for Module mvpa.datasets.base

   1  # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- 
   2  # vi: set ft=python sts=4 ts=4 sw=4 et: 
   3  ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## 
   4  # 
   5  #   See COPYING file distributed along with the PyMVPA package for the 
   6  #   copyright and license terms. 
   7  # 
   8  ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ## 
   9  """Dataset container""" 
  10   
  11  __docformat__ = 'restructuredtext' 
  12   
  13  import operator 
  14  import random 
  15  import mvpa.support.copy as copy 
  16  import numpy as N 
  17   
  18  from sets import Set 
  19   
  20  # Sooner or later Dataset would become ClassWithCollections as well, but for 
  21  # now just an object -- thus commenting out tentative changes 
  22  # 
  23  #XXX from mvpa.misc.state import ClassWithCollections, SampleAttribute 
  24   
  25  from mvpa.misc.exceptions import DatasetError 
  26  from mvpa.misc.support import idhash as idhash_ 
  27  from mvpa.base.dochelpers import enhancedDocString, table2string 
  28   
  29  from mvpa.base import warning 
  30   
  31  if __debug__: 
  32      from mvpa.base import debug 
  33   
34 - def _validate_indexes_uniq_sorted(seq, fname, item):
35 """Helper function to validate that seq contains unique sorted values 36 """ 37 if operator.isSequenceType(seq): 38 seq_unique = N.unique(seq) 39 if len(seq) != len(seq_unique): 40 warning("%s() operates only with indexes for %s without" 41 " repetitions. Repetitions were removed." 42 % (fname, item)) 43 if N.any(N.sort(seq) != seq_unique): 44 warning("%s() does not guarantee the original order" 45 " of selected %ss. Use selectSamples() and " 46 " selectFeatures(sort=False) instead" % (fname, item))
47 48 49 #XXX class Dataset(ClassWithCollections):
50 -class Dataset(object):
51 """*The* Dataset. 52 53 This class provides a container to store all necessary data to 54 perform MVPA analyses. These are the data samples, as well as the 55 labels associated with the samples. Additionally, samples can be 56 grouped into chunks. 57 58 :Groups: 59 - `Creators`: `__init__`, `selectFeatures`, `selectSamples`, 60 `applyMapper` 61 - `Mutators`: `permuteLabels` 62 63 Important: labels assumed to be immutable, i.e. no one should modify 64 them externally by accessing indexed items, ie something like 65 ``dataset.labels[1] += 100`` should not be used. If a label has 66 to be modified, full copy of labels should be obtained, operated on, 67 and assigned back to the dataset, otherwise dataset.uniquelabels 68 would not work. The same applies to any other attribute which has 69 corresponding unique* access property. 70 71 """ 72 # XXX Notes about migration to use Collections to store data and 73 # attributes for samples, features, and dataset itself: 74 75 # changes: 76 # _data -> s_attr collection (samples attributes) 77 # _dsattr -> ds_attr collection 78 # f_attr collection (features attributes) 79 80 # static definition to track which unique attributes 81 # have to be reset/recomputed whenever anything relevant 82 # changes 83 84 # unique{labels,chunks} become a part of dsattr 85 _uniqueattributes = [] 86 """Unique attributes associated with the data""" 87 88 _registeredattributes = [] 89 """Registered attributes (stored in _data)""" 90 91 _requiredattributes = ['samples', 'labels'] 92 """Attributes which have to be provided to __init__, or otherwise 93 no default values would be assumed and construction of the 94 instance would fail""" 95 96 #XXX _ATTRIBUTE_COLLECTIONS = [ 's_attr', 'f_attr', 'ds_attr' ] 97 #XXX """Assure those 3 collections to be present in all datasets""" 98 #XXX 99 #XXX samples__ = SampleAttribute(doc="Samples data. 0th index is time", hasunique=False) # XXX 100 #XXX labels__ = SampleAttribute(doc="Labels for the samples", hasunique=True) 101 #XXX chunks__ = SampleAttribute(doc="Chunk identities for the samples", hasunique=True) 102 #XXX # samples ids (already unique by definition) 103 #XXX origids__ = SampleAttribute(doc="Chunk identities for the samples", hasunique=False) 104
105 - def __init__(self, 106 # for copy constructor 107 data=None, 108 dsattr=None, 109 # automatic dtype conversion 110 dtype=None, 111 # new instances 112 samples=None, 113 labels=None, 114 labels_map=None, 115 chunks=None, 116 origids=None, 117 # flags 118 check_data=True, 119 copy_samples=False, 120 copy_data=True, 121 copy_dsattr=True):
122 """Initialize dataset instance 123 124 There are basically two different way to create a dataset: 125 126 1. Create a new dataset from samples and sample attributes. In 127 this mode a two-dimensional `ndarray` has to be passed to the 128 `samples` keyword argument and the corresponding samples 129 attributes are provided via the `labels` and `chunks` 130 arguments. 131 132 2. Copy contructor mode 133 The second way is used internally to perform quick coyping 134 of datasets, e.g. when performing feature selection. In this 135 mode and the two dictionaries (`data` and `dsattr`) are 136 required. For performance reasons this mode bypasses most of 137 the sanity check performed by the previous mode, as for 138 internal operations data integrity is assumed. 139 140 141 :Parameters: 142 data : dict 143 Dictionary with an arbitrary number of entries. The value for 144 each key in the dict has to be an ndarray with the 145 same length as the number of rows in the samples array. 146 A special entry in this dictionary is 'samples', a 2d array 147 (samples x features). A shallow copy is stored in the object. 148 dsattr : dict 149 Dictionary of dataset attributes. An arbitrary number of 150 arbitrarily named and typed objects can be stored here. A 151 shallow copy of the dictionary is stored in the object. 152 dtype: type | None 153 If None -- do not change data type if samples 154 is an ndarray. Otherwise convert samples to dtype. 155 156 157 :Keywords: 158 samples : ndarray 159 2d array (samples x features) 160 labels 161 An array or scalar value defining labels for each samples. 162 Generally `labels` should be numeric, unless `labels_map` 163 is used 164 labels_map : None or bool or dict 165 Map original labels into numeric labels. If True, the 166 mapping is computed if labels are literal. If is False, 167 no mapping is computed. If dict instance -- provided 168 mapping is verified and applied. If you want to have 169 labels_map just be present given already numeric labels, 170 just assign labels_map dictionary to existing dataset 171 instance 172 chunks 173 An array or scalar value defining chunks for each sample 174 175 Each of the Keywords arguments overwrites what is/might be 176 already in the `data` container. 177 178 """ 179 180 #XXX ClassWithCollections.__init__(self) 181 182 # see if data and dsattr are none, if so, make them empty dicts 183 if data is None: 184 data = {} 185 if dsattr is None: 186 dsattr = {} 187 188 # initialize containers; default values are empty dicts 189 # always make a shallow copy of what comes in, otherwise total chaos 190 # is likely to happen soon 191 if copy_data: 192 # deep copy (cannot use copy.deepcopy, because samples is an 193 # exception 194 # but shallow copy first to get a shared version of the data in 195 # any case 196 lcl_data = data.copy() 197 for k, v in data.iteritems(): 198 # skip copying samples if requested 199 if k == 'samples' and not copy_samples: 200 continue 201 lcl_data[k] = v.copy() 202 else: 203 # shallow copy 204 # XXX? yoh: it might be better speed wise just assign dictionary 205 # without any shallow .copy 206 lcl_data = data.copy() 207 208 if copy_dsattr and len(dsattr)>0: 209 # deep copy 210 if __debug__: 211 debug('DS', "Deep copying dsattr %s" % `dsattr`) 212 lcl_dsattr = copy.deepcopy(dsattr) 213 214 else: 215 # shallow copy 216 lcl_dsattr = copy.copy(dsattr) 217 218 # has to be not private since otherwise derived methods 219 # would have problem accessing it and _registerAttribute 220 # would fail on lambda getters 221 self._data = lcl_data 222 """What makes a dataset.""" 223 224 self._dsattr = lcl_dsattr 225 """Dataset attriibutes.""" 226 227 # store samples (and possibly transform/reshape/retype them) 228 if not samples is None: 229 if __debug__: 230 if lcl_data.has_key('samples'): 231 debug('DS', 232 "`Data` dict has `samples` (%s) but there is also" \ 233 " __init__ parameter `samples` which overrides " \ 234 " stored in `data`" % (`lcl_data['samples'].shape`)) 235 lcl_data['samples'] = self._shapeSamples(samples, dtype, 236 copy_samples) 237 238 # TODO? we might want to have the same logic for chunks and labels 239 # ie if no labels present -- assign arange 240 # MH: don't think this is necessary -- or is there a use case? 241 # labels 242 if not labels is None: 243 if __debug__: 244 if lcl_data.has_key('labels'): 245 debug('DS', 246 "`Data` dict has `labels` (%s) but there is also" + 247 " __init__ parameter `labels` which overrides " + 248 " stored in `data`" % (`lcl_data['labels']`)) 249 if lcl_data.has_key('samples'): 250 lcl_data['labels'] = \ 251 self._expandSampleAttribute(labels, 'labels') 252 253 # check if we got all required attributes 254 for attr in self._requiredattributes: 255 if not lcl_data.has_key(attr): 256 raise DatasetError, \ 257 "Attribute %s is required to initialize dataset" % \ 258 attr 259 260 nsamples = self.nsamples 261 262 # chunks 263 if not chunks == None: 264 lcl_data['chunks'] = \ 265 self._expandSampleAttribute(chunks, 'chunks') 266 elif not lcl_data.has_key('chunks'): 267 # if no chunk information is given assume that every pattern 268 # is its own chunk 269 lcl_data['chunks'] = N.arange(nsamples) 270 271 # samples origids 272 if not origids is None: 273 # simply assign if provided 274 lcl_data['origids'] = origids 275 elif not lcl_data.has_key('origids'): 276 # otherwise contruct unqiue ones 277 lcl_data['origids'] = N.arange(len(lcl_data['labels'])) 278 else: 279 # assume origids have been specified already (copy constructor 280 # mode) leave them as they are, e.g. to make origids survive 281 # selectSamples() 282 pass 283 284 # Initialize attributes which are registered but were not setup 285 for attr in self._registeredattributes: 286 if not lcl_data.has_key(attr): 287 if __debug__: 288 debug("DS", "Initializing attribute %s" % attr) 289 lcl_data[attr] = N.zeros(nsamples) 290 291 # labels_map 292 labels_ = N.asarray(lcl_data['labels']) 293 labels_map_known = lcl_dsattr.has_key('labels_map') 294 if labels_map is True: 295 # need to compose labels_map 296 if labels_.dtype.char == 'S': # or not labels_map_known: 297 # Create mapping 298 ulabels = list(Set(labels_)) 299 ulabels.sort() 300 labels_map = dict([ (x[1], x[0]) for x in enumerate(ulabels) ]) 301 if __debug__: 302 debug('DS', 'Mapping for the labels computed to be %s' 303 % labels_map) 304 else: 305 if __debug__: 306 debug('DS', 'Mapping of labels was requested but labels ' 307 'are not strings. Skipped') 308 labels_map = None 309 pass 310 elif labels_map is False: 311 labels_map = None 312 313 if isinstance(labels_map, dict): 314 if labels_map_known: 315 if __debug__: 316 debug('DS', 317 "`dsattr` dict has `labels_map` (%s) but there is also" \ 318 " __init__ parameter `labels_map` (%s) which overrides " \ 319 " stored in `dsattr`" % (lcl_dsattr['labels_map'], labels_map)) 320 321 lcl_dsattr['labels_map'] = labels_map 322 # map labels if needed (if strings or was explicitely requested) 323 if labels_.dtype.char == 'S' or not labels_map_known: 324 if __debug__: 325 debug('DS_', "Remapping labels using mapping %s" % labels_map) 326 # need to remap 327 # !!! N.array is important here 328 try: 329 lcl_data['labels'] = N.array( 330 [labels_map[x] for x in lcl_data['labels']]) 331 except KeyError, e: 332 raise ValueError, "Provided labels_map %s is insufficient " \ 333 "to map all the labels. Mapping for label %s is " \ 334 "missing" % (labels_map, e) 335 336 elif not lcl_dsattr.has_key('labels_map'): 337 lcl_dsattr['labels_map'] = labels_map 338 elif __debug__: 339 debug('DS_', 'Not overriding labels_map in dsattr since it has one') 340 341 if check_data: 342 self._checkData() 343 344 # lazy computation of unique members 345 #self._resetallunique('_dsattr', self._dsattr) 346 347 # Michael: we cannot do this conditional here. When selectSamples() 348 # removes a whole data chunk the uniquechunks values will be invalid. 349 # Same applies to labels of course. 350 if not labels is None or not chunks is None: 351 # for a speed up to don't go through all uniqueattributes 352 # when no need 353 lcl_dsattr['__uniquereseted'] = False 354 self._resetallunique(force=True)
355 356 357 __doc__ = enhancedDocString('Dataset', locals()) 358 359 360 @property
361 - def idhash(self):
362 """To verify if dataset is in the same state as when smth else was done 363 364 Like if classifier was trained on the same dataset as in question""" 365 366 _data = self._data 367 res = idhash_(_data) 368 369 # we cannot count on the order the values in the dict will show up 370 # with `self._data.value()` and since idhash will be order-dependent 371 # we have to make it deterministic 372 keys = _data.keys() 373 keys.sort() 374 for k in keys: 375 res += idhash_(_data[k]) 376 return res
377 378
379 - def _resetallunique(self, force=False):
380 """Set to None all unique* attributes of corresponding dictionary 381 """ 382 _dsattr = self._dsattr 383 384 if not force and _dsattr['__uniquereseted']: 385 return 386 387 _uniqueattributes = self._uniqueattributes 388 389 if __debug__ and "DS_" in debug.active: 390 debug("DS_", "Reseting all attributes %s for dataset %s" 391 % (_uniqueattributes, 392 self.summary(uniq=False, idhash=False, 393 stats=False, lstats=False))) 394 395 # I guess we better checked if dictname is known but... 396 for k in _uniqueattributes: 397 _dsattr[k] = None 398 _dsattr['__uniquereseted'] = True
399 400
401 - def _getuniqueattr(self, attrib, dict_):
402 """Provide common facility to return unique attributes 403 404 XXX `dict_` can be simply replaced now with self._dsattr 405 """ 406 407 # local bindings 408 _dsattr = self._dsattr 409 410 if not _dsattr.has_key(attrib) or _dsattr[attrib] is None: 411 if __debug__ and 'DS_' in debug.active: 412 debug("DS_", "Recomputing unique set for attrib %s within %s" % 413 (attrib, self.summary(uniq=False, 414 stats=False, lstats=False))) 415 # uff... might come up with better strategy to keep relevant 416 # attribute name 417 _dsattr[attrib] = N.unique( N.asanyarray(dict_[attrib[6:]]) ) 418 assert(not _dsattr[attrib] is None) 419 _dsattr['__uniquereseted'] = False 420 421 return _dsattr[attrib]
422 423
424 - def _setdataattr(self, attrib, value):
425 """Provide common facility to set attributes 426 427 """ 428 if len(value) != self.nsamples: 429 raise ValueError, \ 430 "Provided %s have %d entries while there is %d samples" % \ 431 (attrib, len(value), self.nsamples) 432 self._data[attrib] = N.asarray(value) 433 uniqueattr = "unique" + attrib 434 435 _dsattr = self._dsattr 436 if _dsattr.has_key(uniqueattr): 437 _dsattr[uniqueattr] = None
438 439
440 - def _getNSamplesPerAttr( self, attrib='labels' ):
441 """Returns the number of samples per unique label. 442 """ 443 # local bindings 444 _data = self._data 445 446 # XXX hardcoded dict_=self._data.... might be in self._dsattr 447 uniqueattr = self._getuniqueattr(attrib="unique" + attrib, 448 dict_=_data) 449 450 # use dictionary to cope with arbitrary labels 451 result = dict(zip(uniqueattr, [ 0 ] * len(uniqueattr))) 452 for l in _data[attrib]: 453 result[l] += 1 454 455 # XXX only return values to mimic the old interface but we might want 456 # to return the full dict instead 457 # return result 458 return result
459 460
461 - def _getSampleIdsByAttr(self, values, attrib="labels", 462 sort=True):
463 """Return indecies of samples given a list of attributes 464 """ 465 466 if not operator.isSequenceType(values) \ 467 or isinstance(values, basestring): 468 values = [ values ] 469 470 # TODO: compare to plain for loop through the labels 471 # on a real data example 472 sel = N.array([], dtype=N.int16) 473 _data = self._data 474 for value in values: 475 sel = N.concatenate(( 476 sel, N.where(_data[attrib]==value)[0])) 477 478 if sort: 479 # place samples in the right order 480 sel.sort() 481 482 return sel
483 484
485 - def idsonboundaries(self, prior=0, post=0, 486 attributes_to_track=['labels', 'chunks'], 487 affected_labels=None, 488 revert=False):
489 """Find samples which are on the boundaries of the blocks 490 491 Such samples might need to be removed. By default (with 492 prior=0, post=0) ids of the first samples in a 'block' are 493 reported 494 495 :Parameters: 496 prior : int 497 how many samples prior to transition sample to include 498 post : int 499 how many samples post the transition sample to include 500 attributes_to_track : list of basestring 501 which attributes to track to decide on the boundary condition 502 affected_labels : list of basestring 503 for which labels to perform selection. If None - for all 504 revert : bool 505 either to revert the meaning and provide ids of samples which are found 506 to not to be boundary samples 507 """ 508 # local bindings 509 _data = self._data 510 labels = self.labels 511 nsamples = self.nsamples 512 513 lastseen = none = [None for attr in attributes_to_track] 514 transitions = [] 515 516 for i in xrange(nsamples+1): 517 if i < nsamples: 518 current = [_data[attr][i] for attr in attributes_to_track] 519 else: 520 current = none 521 if lastseen != current: 522 # transition point 523 new_transitions = range(max(0, i-prior), 524 min(nsamples-1, i+post)+1) 525 if affected_labels is not None: 526 new_transitions = [labels[i] for i in new_transitions 527 if i in affected_labels] 528 transitions += new_transitions 529 lastseen = current 530 531 transitions = Set(transitions) 532 if revert: 533 transitions = Set(range(nsamples)).difference(transitions) 534 535 # postprocess 536 transitions = N.array(list(transitions)) 537 transitions.sort() 538 return list(transitions)
539 540
541 - def _shapeSamples(self, samples, dtype, copy):
542 """Adapt different kinds of samples 543 544 Handle all possible input value for 'samples' and tranform 545 them into a 2d (samples x feature) representation. 546 """ 547 # put samples array into correct shape 548 # 1d arrays or simple sequences are assumed to be a single pattern 549 if (not isinstance(samples, N.ndarray)): 550 # it is safe to provide dtype which defaults to None, 551 # when N would choose appropriate dtype automagically 552 samples = N.array(samples, ndmin=2, dtype=dtype, copy=copy) 553 else: 554 if samples.ndim < 2 \ 555 or (not dtype is None and dtype != samples.dtype): 556 if dtype is None: 557 dtype = samples.dtype 558 samples = N.array(samples, ndmin=2, dtype=dtype, copy=copy) 559 elif copy: 560 samples = samples.copy() 561 562 # only samples x features matrices are supported 563 if len(samples.shape) > 2: 564 raise DatasetError, "Only (samples x features) -> 2d sample " \ 565 + "are supported (got %s shape of samples)." \ 566 % (`samples.shape`) \ 567 +" Consider MappedDataset if applicable." 568 569 return samples
570 571
572 - def _checkData(self):
573 """Checks `_data` members to have the same # of samples. 574 """ 575 # 576 # XXX: Maybe just run this under __debug__ and remove the `check_data` 577 # from the constructor, which is too complicated anyway? 578 # 579 580 # local bindings 581 nsamples = self.nsamples 582 _data = self._data 583 584 for k, v in _data.iteritems(): 585 if not len(v) == nsamples: 586 raise DatasetError, \ 587 "Length of sample attribute '%s' [%i] does not " \ 588 "match the number of samples in the dataset [%i]." \ 589 % (k, len(v), nsamples) 590 591 # check for unique origids 592 uniques = N.unique(_data['origids']) 593 uniques.sort() 594 # need to copy to prevent sorting the original array 595 sorted_ids = _data['origids'].copy() 596 sorted_ids.sort() 597 598 if not (uniques == sorted_ids).all(): 599 raise DatasetError, "Samples IDs are not unique." 600 601 # Check if labels as not literal 602 if N.asanyarray(_data['labels'].dtype.char == 'S'): 603 warning('Labels for dataset %s are literal, should be numeric. ' 604 'You might like to use labels_map argument.' % self)
605
606 - def _expandSampleAttribute(self, attr, attr_name):
607 """If a sample attribute is given as a scalar expand/repeat it to a 608 length matching the number of samples in the dataset. 609 """ 610 try: 611 # if we are initializing with a single string -- we should 612 # treat it as a single label 613 if isinstance(attr, basestring): 614 raise TypeError 615 if len(attr) != self.nsamples: 616 raise DatasetError, \ 617 "Length of sample attribute '%s' [%d]" \ 618 % (attr_name, len(attr)) \ 619 + " has to match the number of samples" \ 620 + " [%d]." % self.nsamples 621 # store the sequence as array 622 return N.array(attr) 623 624 except TypeError: 625 # make sequence of identical value matching the number of 626 # samples 627 return N.repeat(attr, self.nsamples)
628 629 630 @classmethod
631 - def _registerAttribute(cls, key, dictname="_data", abbr=None, hasunique=False):
632 """Register an attribute for any Dataset class. 633 634 Creates property assigning getters/setters depending on the 635 availability of corresponding _get, _set functions. 636 """ 637 classdict = cls.__dict__ 638 if not classdict.has_key(key): 639 if __debug__: 640 debug("DS", "Registering new attribute %s" % key) 641 # define get function and use corresponding 642 # _getATTR if such defined 643 getter = '_get%s' % key 644 if classdict.has_key(getter): 645 getter = '%s.%s' % (cls.__name__, getter) 646 else: 647 getter = "lambda x: x.%s['%s']" % (dictname, key) 648 649 # define set function and use corresponding 650 # _setATTR if such defined 651 setter = '_set%s' % key 652 if classdict.has_key(setter): 653 setter = '%s.%s' % (cls.__name__, setter) 654 elif dictname=="_data": 655 setter = "lambda self,x: self._setdataattr" + \ 656 "(attrib='%s', value=x)" % (key) 657 else: 658 setter = None 659 660 if __debug__: 661 debug("DS", "Registering new property %s.%s" % 662 (cls.__name__, key)) 663 exec "%s.%s = property(fget=%s, fset=%s)" % \ 664 (cls.__name__, key, getter, setter) 665 666 if abbr is not None: 667 exec "%s.%s = property(fget=%s, fset=%s)" % \ 668 (cls.__name__, abbr, getter, setter) 669 670 if hasunique: 671 uniquekey = "unique%s" % key 672 getter = '_get%s' % uniquekey 673 if classdict.has_key(getter): 674 getter = '%s.%s' % (cls.__name__, getter) 675 else: 676 getter = "lambda x: x._getuniqueattr" + \ 677 "(attrib='%s', dict_=x.%s)" % (uniquekey, dictname) 678 679 if __debug__: 680 debug("DS", "Registering new property %s.%s" % 681 (cls.__name__, uniquekey)) 682 683 exec "%s.%s = property(fget=%s)" % \ 684 (cls.__name__, uniquekey, getter) 685 if abbr is not None: 686 exec "%s.U%s = property(fget=%s)" % \ 687 (cls.__name__, abbr, getter) 688 689 # create samplesper<ATTR> properties 690 sampleskey = "samplesper%s" % key[:-1] # remove ending 's' XXX 691 if __debug__: 692 debug("DS", "Registering new property %s.%s" % 693 (cls.__name__, sampleskey)) 694 695 exec "%s.%s = property(fget=%s)" % \ 696 (cls.__name__, sampleskey, 697 "lambda x: x._getNSamplesPerAttr(attrib='%s')" % key) 698 699 cls._uniqueattributes.append(uniquekey) 700 701 # create idsby<ATTR> properties 702 sampleskey = "idsby%s" % key # remove ending 's' XXX 703 if __debug__: 704 debug("DS", "Registering new property %s.%s" % 705 (cls.__name__, sampleskey)) 706 707 exec "%s.%s = %s" % (cls.__name__, sampleskey, 708 "lambda self, x: " + 709 "self._getSampleIdsByAttr(x,attrib='%s')" % key) 710 711 cls._uniqueattributes.append(uniquekey) 712 713 cls._registeredattributes.append(key) 714 elif __debug__: 715 warning('Trying to reregister attribute `%s`. For now ' % key + 716 'such capability is not present')
717 718
719 - def __str__(self):
720 """String summary over the object 721 """ 722 return self.summary(uniq=True, 723 idhash=__debug__ and ('DS_ID' in debug.active), 724 stats=__debug__ and ('DS_STATS' in debug.active), 725 lstats=__debug__ and ('DS_STATS' in debug.active), 726 )
727 728
729 - def __repr__(self):
730 return "<%s>" % str(self)
731 732
733 - def summary(self, uniq=True, stats=True, idhash=False, lstats=True, 734 maxc=30, maxl=20):
735 """String summary over the object 736 737 :Parameters: 738 uniq : bool 739 Include summary over data attributes which have unique 740 idhash : bool 741 Include idhash value for dataset and samples 742 stats : bool 743 Include some basic statistics (mean, std, var) over dataset samples 744 lstats : bool 745 Include statistics on chunks/labels 746 maxc : int 747 Maximal number of chunks when provide details on labels/chunks 748 maxl : int 749 Maximal number of labels when provide details on labels/chunks 750 """ 751 # local bindings 752 samples = self.samples 753 _data = self._data 754 _dsattr = self._dsattr 755 756 if idhash: 757 idhash_ds = "{%s}" % self.idhash 758 idhash_samples = "{%s}" % idhash_(samples) 759 else: 760 idhash_ds = "" 761 idhash_samples = "" 762 763 s = """Dataset %s/ %s %d%s x %d""" % \ 764 (idhash_ds, samples.dtype, 765 self.nsamples, idhash_samples, self.nfeatures) 766 767 ssep = (' ', '\n')[lstats] 768 if uniq: 769 s += "%suniq:" % ssep 770 for uattr in _dsattr.keys(): 771 if not uattr.startswith("unique"): 772 continue 773 attr = uattr[6:] 774 try: 775 value = self._getuniqueattr(attrib=uattr, 776 dict_=_data) 777 s += " %d %s" % (len(value), attr) 778 except: 779 pass 780 781 if isinstance(self.labels_map, dict): 782 s += ' labels_mapped' 783 784 if stats: 785 # TODO -- avg per chunk? 786 # XXX We might like to use scipy.stats.describe to get 787 # quick summary statistics (mean/range/skewness/kurtosis) 788 s += "%sstats: mean=%g std=%g var=%g min=%g max=%g\n" % \ 789 (ssep, N.mean(samples), N.std(samples), 790 N.var(samples), N.min(samples), N.max(samples)) 791 792 if lstats: 793 s += self.summary_labels(maxc=maxc, maxl=maxl) 794 795 return s
796 797
798 - def summary_labels(self, maxc=30, maxl=20):
799 """Provide summary statistics over the labels and chunks 800 801 :Parameters: 802 maxc : int 803 Maximal number of chunks when provide details 804 maxl : int 805 Maximal number of labels when provide details 806 """ 807 # We better avoid bound function since if people only 808 # imported Dataset without miscfx it would fail 809 from mvpa.datasets.miscfx import getSamplesPerChunkLabel 810 spcl = getSamplesPerChunkLabel(self) 811 # XXX couldn't they be unordered? 812 ul = self.uniquelabels.tolist() 813 uc = self.uniquechunks.tolist() 814 s = "" 815 if len(ul) < maxl and len(uc) < maxc: 816 s += "\nCounts of labels in each chunk:" 817 # only in a resonable case do printing 818 table = [[' chunks\labels'] + ul] 819 table += [[''] + ['---'] * len(ul)] 820 for c, counts in zip(uc, spcl): 821 table.append([ str(c) ] + counts.tolist()) 822 s += '\n' + table2string(table) 823 else: 824 s += "No details due to large number of labels or chunks. " \ 825 "Increase maxc and maxl if desired" 826 827 labels_map = self.labels_map 828 if isinstance(labels_map, dict): 829 s += "\nOriginal labels were mapped using following mapping:" 830 s += '\n\t'+'\n\t'.join([':\t'.join(map(str, x)) 831 for x in labels_map.items()]) + '\n' 832 833 def cl_stats(axis, u, name1, name2): 834 """ Compute statistics per label 835 """ 836 stats = {'min': N.min(spcl, axis=axis), 837 'max': N.max(spcl, axis=axis), 838 'mean': N.mean(spcl, axis=axis), 839 'std': N.std(spcl, axis=axis), 840 '#%ss' % name2: N.sum(spcl>0, axis=axis)} 841 entries = [' ' + name1, 'mean', 'std', 'min', 'max', '#%ss' % name2] 842 table = [ entries ] 843 for i, l in enumerate(u): 844 d = {' ' + name1 : l} 845 d.update(dict([ (k, stats[k][i]) for k in stats.keys()])) 846 table.append( [ ('%.3g', '%s')[isinstance(d[e], basestring)] 847 % d[e] for e in entries] ) 848 return '\nSummary per %s across %ss\n' % (name1, name2) \ 849 + table2string(table)
850 851 if len(ul) < maxl: 852 s += cl_stats(0, ul, 'label', 'chunk') 853 if len(uc) < maxc: 854 s += cl_stats(1, uc, 'chunk', 'label') 855 return s
856 857
858 - def __iadd__(self, other):
859 """Merge the samples of one Dataset object to another (in-place). 860 861 No dataset attributes, besides labels_map, will be merged! 862 Additionally, a new set of unique `origids` will be generated. 863 """ 864 # local bindings 865 _data = self._data 866 other_data = other._data 867 868 if not self.nfeatures == other.nfeatures: 869 raise DatasetError, "Cannot add Dataset, because the number of " \ 870 "feature do not match." 871 872 # take care about labels_map and labels 873 slm = self.labels_map 874 olm = other.labels_map 875 if N.logical_xor(slm is None, olm is None): 876 raise ValueError, "Cannot add datasets where only one of them " \ 877 "has labels map assigned. If needed -- implement it" 878 879 # concatenate all sample attributes 880 for k,v in _data.iteritems(): 881 if k == 'origids': 882 # special case samples origids: for now just regenerate unique 883 # ones could also check if concatenation is unique, but it 884 # would be costly performance-wise 885 _data[k] = N.arange(len(v) + len(other_data[k])) 886 887 elif k == 'labels' and slm is not None: 888 # special care about labels if mapping was in effect, 889 # we need to append 2nd map to the first one and 890 # relabel 2nd dataset 891 nlm = slm.copy() 892 # figure out maximal numerical label used now 893 nextid = N.sort(nlm.values())[-1] + 1 894 olabels = other.labels 895 olabels_remap = {} 896 for ol, olnum in olm.iteritems(): 897 if not nlm.has_key(ol): 898 # check if we can preserve old numberic label 899 # if not -- assign some new one not yet present 900 # in any dataset 901 if olnum in nlm.values(): 902 nextid = N.sort(nlm.values() + olm.values())[-1] + 1 903 else: 904 nextid = olnum 905 olabels_remap[olnum] = nextid 906 nlm[ol] = nextid 907 nextid += 1 908 else: 909 olabels_remap[olnum] = nlm[ol] 910 olabels = [olabels_remap[x] for x in olabels] 911 # finally compose new labels 912 _data['labels'] = N.concatenate((v, olabels), axis=0) 913 # and reassign new mapping 914 self._dsattr['labels_map'] = nlm 915 916 if __debug__: 917 # check if we are not dealing with colliding 918 # mapping, since it is problematic and might lead 919 # to various complications 920 if (len(Set(slm.keys())) != len(Set(slm.values()))) or \ 921 (len(Set(olm.keys())) != len(Set(olm.values()))): 922 warning("Adding datasets where multiple labels " 923 "mapped to the same ID is not recommended. " 924 "Please check the outcome. Original mappings " 925 "were %s and %s. Resultant is %s" 926 % (slm, olm, nlm)) 927 928 else: 929 _data[k] = N.concatenate((v, other_data[k]), axis=0) 930 931 # might be more sophisticated but for now just reset -- it is safer ;) 932 self._resetallunique() 933 934 return self
935 936
937 - def __add__( self, other ):
938 """Merge the samples two Dataset objects. 939 940 All data of both datasets is copied, concatenated and a new Dataset is 941 returned. 942 943 NOTE: This can be a costly operation (both memory and time). If 944 performance is important consider the '+=' operator. 945 """ 946 # create a new object of the same type it is now and NOT only Dataset 947 out = super(Dataset, self).__new__(self.__class__) 948 949 # now init it: to make it work all Dataset contructors have to accept 950 # Class(data=Dict, dsattr=Dict) 951 out.__init__(data=self._data, 952 dsattr=self._dsattr, 953 copy_samples=True, 954 copy_data=True, 955 copy_dsattr=True) 956 957 out += other 958 959 return out
960 961
962 - def copy(self, deep=True):
963 """Create a copy (clone) of the dataset, by fully copying current one 964 965 :Keywords: 966 deep : bool 967 deep flag is provided to __init__ for 968 copy_{samples,data,dsattr}. By default full copy is done. 969 """ 970 # create a new object of the same type it is now and NOT only Dataset 971 out = super(Dataset, self).__new__(self.__class__) 972 973 # now init it: to make it work all Dataset contructors have to accept 974 # Class(data=Dict, dsattr=Dict) 975 out.__init__(data=self._data, 976 dsattr=self._dsattr, 977 copy_samples=True, 978 copy_data=True, 979 copy_dsattr=True) 980 981 return out
982 983
984 - def selectFeatures(self, ids=None, sort=True, groups=None):
985 """Select a number of features from the current set. 986 987 :Parameters: 988 ids 989 iterable container to select ids 990 sort : bool 991 if to sort Ids. Order matters and `selectFeatures` assumes 992 incremental order. If not such, in non-optimized code 993 selectFeatures would verify the order and sort 994 995 Returns a new Dataset object with a copy of corresponding features 996 from the original samples array. 997 998 WARNING: The order of ids determines the order of features in 999 the returned dataset. This might be useful sometimes, but can 1000 also cause major headaches! Order would is verified when 1001 running in non-optimized code (if __debug__) 1002 """ 1003 if ids is None and groups is None: 1004 raise ValueError, "No feature selection specified." 1005 1006 # start with empty list if no ids where specified (so just groups) 1007 if ids is None: 1008 ids = [] 1009 1010 if not groups is None: 1011 if not self._dsattr.has_key('featuregroups'): 1012 raise RuntimeError, \ 1013 "Dataset has no feature grouping information." 1014 1015 for g in groups: 1016 ids += (self._dsattr['featuregroups'] == g).nonzero()[0].tolist() 1017 1018 # XXX set sort default to True, now sorting has to be explicitely 1019 # disabled and warning is not necessary anymore 1020 if sort: 1021 ids = copy.deepcopy(ids) 1022 ids.sort() 1023 elif __debug__ and 'CHECK_DS_SORTED' in debug.active: 1024 from mvpa.misc.support import isSorted 1025 if not isSorted(ids): 1026 warning("IDs for selectFeatures must be provided " + 1027 "in sorted order, otherwise major headache might occur") 1028 1029 # shallow-copy all stuff from current data dict 1030 new_data = self._data.copy() 1031 1032 # assign the selected features -- data is still shared with 1033 # current dataset 1034 new_data['samples'] = self._data['samples'][:, ids] 1035 1036 # apply selection to feature groups as well 1037 if self._dsattr.has_key('featuregroups'): 1038 new_dsattr = self._dsattr.copy() 1039 new_dsattr['featuregroups'] = self._dsattr['featuregroups'][ids] 1040 else: 1041 new_dsattr = self._dsattr 1042 1043 # create a new object of the same type it is now and NOT only Dataset 1044 dataset = super(Dataset, self).__new__(self.__class__) 1045 1046 # now init it: to make it work all Dataset contructors have to accept 1047 # Class(data=Dict, dsattr=Dict) 1048 dataset.__init__(data=new_data, 1049 dsattr=new_dsattr, 1050 check_data=False, 1051 copy_samples=False, 1052 copy_data=False, 1053 copy_dsattr=False 1054 ) 1055 1056 return dataset
1057 1058
1059 - def applyMapper(self, featuresmapper=None, samplesmapper=None, 1060 train=True):
1061 """Obtain new dataset by applying mappers over features and/or samples. 1062 1063 While featuresmappers leave the sample attributes information 1064 unchanged, as the number of samples in the dataset is invariant, 1065 samplesmappers are also applied to the samples attributes themselves! 1066 1067 Applying a featuresmapper will destroy any feature grouping information. 1068 1069 :Parameters: 1070 featuresmapper : Mapper 1071 `Mapper` to somehow transform each sample's features 1072 samplesmapper : Mapper 1073 `Mapper` to transform each feature across samples 1074 train : bool 1075 Flag whether to train the mapper with this dataset before applying 1076 it. 1077 1078 TODO: selectFeatures is pretty much 1079 applyMapper(featuresmapper=MaskMapper(...)) 1080 """ 1081 1082 # shallow-copy all stuff from current data dict 1083 new_data = self._data.copy() 1084 1085 # apply mappers 1086 1087 if samplesmapper: 1088 if __debug__: 1089 debug("DS", "Training samplesmapper %s" % `samplesmapper`) 1090 samplesmapper.train(self) 1091 1092 if __debug__: 1093 debug("DS", "Applying samplesmapper %s" % `samplesmapper` + 1094 " to samples of dataset `%s`" % `self`) 1095 1096 # get rid of existing 'origids' as they are not valid anymore and 1097 # applying a mapper to them is not really meaningful 1098 if new_data.has_key('origids'): 1099 del(new_data['origids']) 1100 1101 # apply mapper to all sample-wise data in dataset 1102 for k in new_data.keys(): 1103 new_data[k] = samplesmapper.forward(self._data[k]) 1104 1105 # feature mapping might affect dataset attributes 1106 # XXX: might be obsolete when proper feature attributes are implemented 1107 new_dsattr = self._dsattr 1108 1109 if featuresmapper: 1110 if __debug__: 1111 debug("DS", "Training featuresmapper %s" % `featuresmapper`) 1112 featuresmapper.train(self) 1113 1114 if __debug__: 1115 debug("DS", "Applying featuresmapper %s" % `featuresmapper` + 1116 " to samples of dataset `%s`" % `self`) 1117 new_data['samples'] = featuresmapper.forward(self._data['samples']) 1118 1119 # remove feature grouping, who knows what the mapper did to the 1120 # features 1121 if self._dsattr.has_key('featuregroups'): 1122 new_dsattr = self._dsattr.copy() 1123 del(new_dsattr['featuregroups']) 1124 else: 1125 new_dsattr = self._dsattr 1126 1127 # create a new object of the same type it is now and NOT only Dataset 1128 dataset = super(Dataset, self).__new__(self.__class__) 1129 1130 # now init it: to make it work all Dataset contructors have to accept 1131 # Class(data=Dict, dsattr=Dict) 1132 dataset.__init__(data=new_data, 1133 dsattr=new_dsattr, 1134 check_data=False, 1135 copy_samples=False, 1136 copy_data=False, 1137 copy_dsattr=False 1138 ) 1139 1140 # samples attributes might have changed after applying samplesmapper 1141 if samplesmapper: 1142 dataset._resetallunique(force=True) 1143 1144 return dataset
1145 1146
1147 - def selectSamples(self, ids):
1148 """Choose a subset of samples defined by samples IDs. 1149 1150 Returns a new dataset object containing the selected sample 1151 subset. 1152 1153 TODO: yoh, we might need to sort the mask if the mask is a 1154 list of ids and is not ordered. Clarify with Michael what is 1155 our intent here! 1156 """ 1157 # without having a sequence a index the masked sample array would 1158 # loose its 2d layout 1159 if not operator.isSequenceType( ids ): 1160 ids = [ids] 1161 # TODO: Reconsider crafting a slice if it can be done to don't copy 1162 # the data 1163 #try: 1164 # minmask = min(mask) 1165 # maxmask = max(mask) 1166 #except: 1167 # minmask = min(map(int,mask)) 1168 # maxmask = max(map(int,mask)) 1169 # lets see if we could get it done with cheap view/slice 1170 #(minmask, maxmask) != (0, 1) and \ 1171 #if len(mask) > 2 and \ 1172 # N.array([N.arange(minmask, maxmask+1) == N.array(mask)]).all(): 1173 # slice_ = slice(minmask, maxmask+1) 1174 # if __debug__: 1175 # debug("DS", "We can and do convert mask %s into splice %s" % 1176 # (mask, slice_)) 1177 # mask = slice_ 1178 # mask all sample attributes 1179 data = {} 1180 for k, v in self._data.iteritems(): 1181 data[k] = v[ids, ] 1182 1183 # create a new object of the same type it is now and NOT onyl Dataset 1184 dataset = super(Dataset, self).__new__(self.__class__) 1185 1186 # now init it: to make it work all Dataset contructors have to accept 1187 # Class(data=Dict, dsattr=Dict) 1188 dataset.__init__(data=data, 1189 dsattr=self._dsattr, 1190 check_data=False, 1191 copy_samples=False, 1192 copy_data=False, 1193 copy_dsattr=False) 1194 1195 dataset._resetallunique(force=True) 1196 return dataset
1197 1198 1199
1200 - def index(self, *args, **kwargs):
1201 """Universal indexer to obtain indexes of interesting samples/features. 1202 See .select() for more information 1203 1204 :Return: tuple of (samples indexes, features indexes). Each 1205 item could be also None, if no selection on samples or 1206 features was requested (to discriminate between no selected 1207 items, and no selections) 1208 """ 1209 s_indx = [] # selections for samples 1210 f_indx = [] # selections for features 1211 return_dataset = kwargs.pop('return_dataset', False) 1212 largs = len(args) 1213 1214 args = list(args) # so we could override 1215 # Figure out number of positional 1216 largs_nonstring = 0 1217 # need to go with index since we might need to override internally 1218 for i in xrange(largs): 1219 l = args[i] 1220 if isinstance(l, basestring): 1221 if l.lower() == 'all': 1222 # override with a slice 1223 args[i] = slice(None) 1224 else: 1225 break 1226 largs_nonstring += 1 1227 1228 if largs_nonstring >= 1: 1229 s_indx.append(args[0]) 1230 if __debug__ and 'CHECK_DS_SELECT' in debug.active: 1231 _validate_indexes_uniq_sorted(args[0], 'select', 'samples') 1232 if largs_nonstring == 2: 1233 f_indx.append(args[1]) 1234 if __debug__ and 'CHECK_DS_SELECT' in debug.active: 1235 _validate_indexes_uniq_sorted(args[1], 'select', 'features') 1236 elif largs_nonstring > 2: 1237 raise ValueError, "Only two positional arguments are allowed" \ 1238 ". 1st for samples, 2nd for features" 1239 1240 # process left positional arguments which must encode selections like 1241 # ('labels', [1,2,3]) 1242 1243 if (largs - largs_nonstring) % 2 != 0: 1244 raise ValueError, "Positional selections must come in pairs:" \ 1245 " e.g. ('labels', [1,2,3])" 1246 1247 for i in xrange(largs_nonstring, largs, 2): 1248 k, v = args[i:i+2] 1249 kwargs[k] = v 1250 1251 # process keyword parameters 1252 data_ = self._data 1253 for k, v in kwargs.iteritems(): 1254 if k == 'samples': 1255 s_indx.append(v) 1256 elif k == 'features': 1257 f_indx.append(v) 1258 elif data_.has_key(k): 1259 # so it is an attribute for samples 1260 # XXX may be do it not only if __debug__ 1261 if __debug__: # and 'CHECK_DS_SELECT' in debug.active: 1262 if not N.any([isinstance(v, cls) for cls in 1263 [list, tuple, slice, int]]): 1264 raise ValueError, "Trying to specify selection for %s " \ 1265 "based on unsupported '%s'" % (k, v) 1266 s_indx.append(self._getSampleIdsByAttr(v, attrib=k, sort=False)) 1267 else: 1268 raise ValueError, 'Keyword "%s" is not known, thus' \ 1269 'select() failed' % k 1270 1271 def combine_indexes(indx, nelements): 1272 """Helper function: intersect selections given in indx 1273 1274 :Parameters: 1275 indxs : list of lists or slices 1276 selections of elements 1277 nelements : int 1278 number of elements total for deriving indexes from slices 1279 """ 1280 indx_sel = None # pure list of ids for selection 1281 for s in indx: 1282 if isinstance(s, slice) or \ 1283 isinstance(s, N.ndarray) and s.dtype==bool: 1284 # XXX there might be a better way than reconstructing the full 1285 # index list. Also we are loosing ability to do simlpe slicing, 1286 # ie w.o making a copy of the selected data 1287 all_indexes = N.arange(nelements) 1288 s = all_indexes[s] 1289 elif not operator.isSequenceType(s): 1290 s = [ s ] 1291 1292 if indx_sel is None: 1293 indx_sel = Set(s) 1294 else: 1295 # To be consistent 1296 #if not isinstance(indx_sel, Set): 1297 # indx_sel = Set(indx_sel) 1298 indx_sel = indx_sel.intersection(s) 1299 1300 # if we got Set -- convert 1301 if isinstance(indx_sel, Set): 1302 indx_sel = list(indx_sel) 1303 1304 # sort for the sake of sanity 1305 indx_sel.sort() 1306 1307 return indx_sel
1308 1309 # Select samples 1310 if len(s_indx) == 1 and isinstance(s_indx[0], slice) \ 1311 and s_indx[0] == slice(None): 1312 # so no actual selection -- full slice 1313 s_indx = s_indx[0] 1314 else: 1315 # else - get indexes 1316 if len(s_indx) == 0: 1317 s_indx = None 1318 else: 1319 s_indx = combine_indexes(s_indx, self.nsamples) 1320 1321 # Select features 1322 if len(f_indx): 1323 f_indx = combine_indexes(f_indx, self.nfeatures) 1324 else: 1325 f_indx = None 1326 1327 return s_indx, f_indx 1328 1329
1330 - def select(self, *args, **kwargs):
1331 """Universal selector 1332 1333 WARNING: if you need to select duplicate samples 1334 (e.g. samples=[5,5]) or order of selected samples of features 1335 is important and has to be not ordered (e.g. samples=[3,2,1]), 1336 please use selectFeatures or selectSamples functions directly 1337 1338 Examples: 1339 Mimique plain selectSamples:: 1340 1341 dataset.select([1,2,3]) 1342 dataset[[1,2,3]] 1343 1344 Mimique plain selectFeatures:: 1345 1346 dataset.select(slice(None), [1,2,3]) 1347 dataset.select('all', [1,2,3]) 1348 dataset[:, [1,2,3]] 1349 1350 Mixed (select features and samples):: 1351 1352 dataset.select([1,2,3], [1, 2]) 1353 dataset[[1,2,3], [1, 2]] 1354 1355 Select samples matching some attributes:: 1356 1357 dataset.select(labels=[1,2], chunks=[2,4]) 1358 dataset.select('labels', [1,2], 'chunks', [2,4]) 1359 dataset['labels', [1,2], 'chunks', [2,4]] 1360 1361 Mixed -- out of first 100 samples, select only those with 1362 labels 1 or 2 and belonging to chunks 2 or 4, and select 1363 features 2 and 3:: 1364 1365 dataset.select(slice(0,100), [2,3], labels=[1,2], chunks=[2,4]) 1366 dataset[:100, [2,3], 'labels', [1,2], 'chunks', [2,4]] 1367 1368 """ 1369 s_indx, f_indx = self.index(*args, **kwargs) 1370 1371 # Select samples 1372 if s_indx == slice(None): 1373 # so no actual selection was requested among samples. 1374 # thus proceed with original dataset 1375 if __debug__: 1376 debug('DS', 'in select() not selecting samples') 1377 ds = self 1378 else: 1379 # else do selection 1380 if __debug__: 1381 debug('DS', 'in select() selecting samples given selections' 1382 + str(s_indx)) 1383 ds = self.selectSamples(s_indx) 1384 1385 # Select features 1386 if f_indx is not None: 1387 if __debug__: 1388 debug('DS', 'in select() selecting features given selections' 1389 + str(f_indx)) 1390 ds = ds.selectFeatures(f_indx) 1391 1392 return ds
1393 1394 1395
1396 - def where(self, *args, **kwargs):
1397 """Obtain indexes of interesting samples/features. See select() for more information 1398 1399 XXX somewhat obsoletes idsby... 1400 """ 1401 s_indx, f_indx = self.index(*args, **kwargs) 1402 if s_indx is not None and f_indx is not None: 1403 return s_indx, f_indx 1404 elif s_indx is not None: 1405 return s_indx 1406 else: 1407 return f_indx
1408 1409
1410 - def __getitem__(self, *args):
1411 """Convinience dataset parts selection 1412 1413 See select for more information 1414 """ 1415 # for cases like ['labels', 1] 1416 if len(args) == 1 and isinstance(args[0], tuple): 1417 args = args[0] 1418 1419 args_, args = args, () 1420 for a in args_: 1421 if isinstance(a, slice) and \ 1422 isinstance(a.start, basestring): 1423 # for the constructs like ['labels':[1,2]] 1424 if a.stop is None or a.step is not None: 1425 raise ValueError, \ 1426 "Selection must look like ['chunks':[2,3]]" 1427 args += (a.start, a.stop) 1428 else: 1429 args += (a,) 1430 return self.select(*args)
1431 1432
1433 - def permuteLabels(self, status, perchunk=True, assure_permute=False):
1434 """Permute the labels. 1435 1436 TODO: rename status into something closer in semantics. 1437 1438 :Parameters: 1439 status : bool 1440 Calling this method with set to True, the labels are 1441 permuted among all samples. If 'status' is False the 1442 original labels are restored. 1443 perchunk : bool 1444 If True permutation is limited to samples sharing the same 1445 chunk value. Therefore only the association of a certain 1446 sample with a label is permuted while keeping the absolute 1447 number of occurences of each label value within a certain 1448 chunk constant. 1449 assure_permute : bool 1450 If True, assures that labels are permutted, ie any one is 1451 different from the original one 1452 """ 1453 # local bindings 1454 _data = self._data 1455 1456 if len(self.uniquelabels)<2: 1457 raise RuntimeError, \ 1458 "Call to permuteLabels is bogus since there is insuficient" \ 1459 " number of labels: %s" % self.uniquelabels 1460 1461 if not status: 1462 # restore originals 1463 if _data.get('origlabels', None) is None: 1464 raise RuntimeError, 'Cannot restore labels. ' \ 1465 'permuteLabels() has never been ' \ 1466 'called with status == True.' 1467 self.labels = _data['origlabels'] 1468 _data.pop('origlabels') 1469 else: 1470 # store orig labels, but only if not yet done, otherwise multiple 1471 # calls with status == True will destroy the original labels 1472 if not _data.has_key('origlabels') \ 1473 or _data['origlabels'] == None: 1474 # bind old labels to origlabels 1475 _data['origlabels'] = _data['labels'] 1476 # copy labels 1477 _data['labels'] = copy.copy(_data['labels']) 1478 1479 labels = _data['labels'] 1480 # now scramble 1481 if perchunk: 1482 for o in self.uniquechunks: 1483 labels[self.chunks == o] = \ 1484 N.random.permutation(labels[self.chunks == o]) 1485 else: 1486 labels = N.random.permutation(labels) 1487 1488 self.labels = labels 1489 1490 if assure_permute: 1491 if not (_data['labels'] != _data['origlabels']).any(): 1492 if not (assure_permute is True): 1493 if assure_permute == 1: 1494 raise RuntimeError, \ 1495 "Cannot assure permutation of labels %s for " \ 1496 "some reason with chunks %s and while " \ 1497 "perchunk=%s . Should not happen" % \ 1498 (self.labels, self.chunks, perchunk) 1499 else: 1500 assure_permute = 11 # make 10 attempts 1501 if __debug__: 1502 debug("DS", "Recalling permute to assure different labels") 1503 self.permuteLabels(status, perchunk=perchunk, 1504 assure_permute=assure_permute-1)
1505 1506
1507 - def getRandomSamples(self, nperlabel):
1508 """Select a random set of samples. 1509 1510 If 'nperlabel' is an integer value, the specified number of samples is 1511 randomly choosen from the group of samples sharing a unique label 1512 value ( total number of selected samples: nperlabel x len(uniquelabels). 1513 1514 If 'nperlabel' is a list which's length has to match the number of 1515 unique label values. In this case 'nperlabel' specifies the number of 1516 samples that shall be selected from the samples with the corresponding 1517 label. 1518 1519 The method returns a Dataset object containing the selected 1520 samples. 1521 """ 1522 # if interger is given take this value for all classes 1523 if isinstance(nperlabel, int): 1524 nperlabel = [ nperlabel for i in self.uniquelabels ] 1525 1526 sample = [] 1527 # for each available class 1528 labels = self.labels 1529 for i, r in enumerate(self.uniquelabels): 1530 # get the list of pattern ids for this class 1531 sample += random.sample( (labels == r).nonzero()[0], 1532 nperlabel[i] ) 1533 1534 return self.selectSamples( sample )
1535 1536 1537 # def _setchunks(self, chunks): 1538 # """Sets chunks and recomputes uniquechunks 1539 # """ 1540 # self._data['chunks'] = N.array(chunks) 1541 # self._dsattr['uniquechunks'] = None # None!since we might not need them 1542 1543
1544 - def getNSamples( self ):
1545 """Currently available number of patterns. 1546 """ 1547 return self._data['samples'].shape[0]
1548 1549
1550 - def getNFeatures( self ):
1551 """Number of features per pattern. 1552 """ 1553 return self._data['samples'].shape[1]
1554 1555
1556 - def getLabelsMap(self):
1557 """Stored labels map (if any) 1558 """ 1559 return self._dsattr.get('labels_map', None)
1560 1561
1562 - def setLabelsMap(self, lm):
1563 """Set labels map. 1564 1565 Checks for the validity of the mapping -- values should cover 1566 all existing labels in the dataset 1567 """ 1568 values = Set(lm.values()) 1569 labels = Set(self.uniquelabels) 1570 if not values.issuperset(labels): 1571 raise ValueError, \ 1572 "Provided mapping %s has some existing labels (out of %s) " \ 1573 "missing from mapping" % (list(values), list(labels)) 1574 self._dsattr['labels_map'] = lm
1575 1576
1577 - def setSamplesDType(self, dtype):
1578 """Set the data type of the samples array. 1579 """ 1580 # local bindings 1581 _data = self._data 1582 1583 if _data['samples'].dtype != dtype: 1584 _data['samples'] = _data['samples'].astype(dtype)
1585 1586
1587 - def defineFeatureGroups(self, definition):
1588 """Assign `definition` to featuregroups 1589 1590 XXX Feature-groups was not finished to be useful 1591 """ 1592 if not len(definition) == self.nfeatures: 1593 raise ValueError, \ 1594 "Length of feature group definition %i " \ 1595 "does not match the number of features %i " \ 1596 % (len(definition), self.nfeatures) 1597 1598 self._dsattr['featuregroups'] = N.array(definition)
1599 1600
1601 - def convertFeatureIds2FeatureMask(self, ids):
1602 """Returns a boolean mask with all features in `ids` selected. 1603 1604 :Parameters: 1605 ids: list or 1d array 1606 To be selected features ids. 1607 1608 :Returns: 1609 ndarray: dtype='bool' 1610 All selected features are set to True; False otherwise. 1611 """ 1612 fmask = N.repeat(False, self.nfeatures) 1613 fmask[ids] = True 1614 1615 return fmask
1616 1617
1618 - def convertFeatureMask2FeatureIds(self, mask):
1619 """Returns feature ids corresponding to non-zero elements in the mask. 1620 1621 :Parameters: 1622 mask: 1d ndarray 1623 Feature mask. 1624 1625 :Returns: 1626 ndarray: integer 1627 Ids of non-zero (non-False) mask elements. 1628 """ 1629 return mask.nonzero()[0]
1630 1631 1632 @staticmethod
1633 - def _checkCopyConstructorArgs(**kwargs):
1634 """Common sanity check for Dataset copy constructor calls.""" 1635 # check if we have samples (somwhere) 1636 samples = None 1637 if kwargs.has_key('samples'): 1638 samples = kwargs['samples'] 1639 if samples is None and kwargs.has_key('data') \ 1640 and kwargs['data'].has_key('samples'): 1641 samples = kwargs['data']['samples'] 1642 if samples is None: 1643 raise DatasetError, \ 1644 "`samples` must be provided to copy constructor call." 1645 1646 if not len(samples.shape) == 2: 1647 raise DatasetError, \ 1648 "samples must be in 2D shape in copy constructor call."
1649 1650 1651 # read-only class properties 1652 nsamples = property( fget=getNSamples ) 1653 nfeatures = property( fget=getNFeatures ) 1654 labels_map = property( fget=getLabelsMap, fset=setLabelsMap ) 1655
1656 -def datasetmethod(func):
1657 """Decorator to easily bind functions to a Dataset class 1658 """ 1659 if __debug__: 1660 debug("DS_", "Binding function %s to Dataset class" % func.func_name) 1661 1662 # Bind the function 1663 setattr(Dataset, func.func_name, func) 1664 1665 # return the original one 1666 return func
1667 1668 1669 # Following attributes adherent to the basic dataset 1670 Dataset._registerAttribute("samples", "_data", abbr='S', hasunique=False) 1671 Dataset._registerAttribute("labels", "_data", abbr='L', hasunique=True) 1672 Dataset._registerAttribute("chunks", "_data", abbr='C', hasunique=True) 1673 # samples ids (already unique by definition) 1674 Dataset._registerAttribute("origids", "_data", abbr='I', hasunique=False) 1675 1676 1677 1678 # XXX This is the place to redo the Dataset base class in a more powerful, yet 1679 # simpler way. The basic goal is to allow for all kinds of attributes: 1680 # 1681 # 1) Samples attributes (per-sample full) 1682 # 2) Features attributes (per-feature stuff) 1683 # 1684 # 3) Dataset attributes (per-dataset stuff) 1685 # 1686 # Use cases: 1687 # 1688 # 1) labels and chunks -- goal: it should be possible to have multivariate 1689 # labels, e.g. to specify targets for a neural network output layer 1690 # 1691 # 2) feature binding/grouping -- goal: easily define ROIs in datasets, or 1692 # group/mark various types of feature so they could be selected or 1693 # discarded all together 1694 # 1695 # 3) Mappers, or chains of them (this should be possible already, but could 1696 # be better integrated to make applyMapper() obsolete). 1697 # 1698 # 1699 # Perform distortion correction on __init__(). The copy contructor 1700 # implementation should move into a separate classmethod. 1701 # 1702 # Think about implementing the current 'clever' attributes in terms of one-time 1703 # properties as suggested by Fernando on nipy-devel. 1704 1705 # ... 1706 1707 from mvpa.misc.state import ClassWithCollections, Collection 1708 from mvpa.misc.attributes import SampleAttribute, FeatureAttribute, \ 1709 DatasetAttribute 1710 1711 # Remaining public interface of Dataset
1712 -class _Dataset(ClassWithCollections):
1713 """The successor of Dataset. 1714 """ 1715 # placeholder for all three basic collections of a Dataset 1716 # put here to be able to check whether the AttributesCollector already 1717 # instanciated a particular collection 1718 # XXX maybe it should not do this at all for Dataset 1719 sa = None 1720 fa = None 1721 dsa = None 1722 1723 # storage of samples in a plain NumPy array for fast access 1724 samples = None 1725
1726 - def __init__(self, samples, sa=None, fa=None, dsa=None):
1727 """ 1728 This is the generic internal constructor. Its main task is to allow 1729 for a maximum level of customization during dataset construction, 1730 including fast copy construction. 1731 1732 Parameters 1733 ---------- 1734 samples : ndarray 1735 Data samples. 1736 sa : Collection 1737 Samples attributes collection. 1738 fa : Collection 1739 Features attributes collection. 1740 dsa : Collection 1741 Dataset attributes collection. 1742 """ 1743 # init base class 1744 ClassWithCollections.__init__(self) 1745 1746 # Internal constructor -- users focus on init* Methods 1747 1748 # Every dataset needs data (aka samples), completely data-driven 1749 # analyses might not even need labels, so this is the only mandatory 1750 # argument 1751 # XXX add checks 1752 self.samples = samples 1753 1754 # Everything else in a dataset (except for samples) is organized in 1755 # collections 1756 # copy attributes from source collections (scol) into target 1757 # collections (tcol) 1758 for scol, tcol in ((sa, self.sa), 1759 (fa, self.fa), 1760 (dsa, self.dsa)): 1761 # make sure we have the target collection 1762 if tcol is None: 1763 # XXX maybe use different classes for the collections 1764 # but currently no reason to do so 1765 tcol = Collection(owner=self) 1766 1767 # transfer the attributes 1768 if not scol is None: 1769 for name, attr in scol.items.iteritems(): 1770 # this will also update the owner of the attribute 1771 # XXX discuss the implications of always copying 1772 tcol.add(copy.copy(attr))
1773 1774 1775 @classmethod
1776 - def initSimple(klass, samples, labels, chunks):
1777 # use Numpy convention 1778 """ 1779 One line summary. 1780 1781 Long description. 1782 1783 Parameters 1784 ---------- 1785 samples : ndarray 1786 The two-dimensional samples matrix. 1787 labels : ndarray 1788 chunks : ndarray 1789 1790 Returns 1791 ------- 1792 blah blah 1793 1794 Notes 1795 ----- 1796 blah blah 1797 1798 See Also 1799 -------- 1800 blah blah 1801 1802 Examples 1803 -------- 1804 blah blah 1805 """ 1806 # Demo user contructor 1807 1808 # compile the necessary samples attributes collection 1809 labels_ = SampleAttribute(name='labels') 1810 labels_.value = labels 1811 chunks_ = SampleAttribute(name='chunks') 1812 chunks_.value = chunks 1813 1814 # feels strange that one has to give the name again 1815 # XXX why does items have to be a dict when each samples 1816 # attr already knows its name 1817 sa = Collection(items={'labels': labels_, 'chunks': chunks_}) 1818 1819 # common checks should go into __init__ 1820 return klass(samples, sa=sa)
1821 1822
1823 - def getNSamples( self ):
1824 """Currently available number of patterns. 1825 """ 1826 return self.samples.shape[0]
1827 1828
1829 - def getNFeatures( self ):
1830 """Number of features per pattern. 1831 """ 1832 return self.samples.shape[1]
1833 1834 1835 # 1836 # @property 1837 # def idhash(self): 1838 # pass 1839 # 1840 # 1841 # def idsonboundaries(self, prior=0, post=0, 1842 # attributes_to_track=['labels', 'chunks'], 1843 # affected_labels=None, 1844 # revert=False): 1845 # pass 1846 # 1847 # 1848 # def summary(self, uniq=True, stats=True, idhash=False, lstats=True, 1849 # maxc=30, maxl=20): 1850 # pass 1851 # 1852 # 1853 # def summary_labels(self, maxc=30, maxl=20): 1854 # pass 1855 # 1856 # 1857 # def __iadd__(self, other): 1858 # pass 1859 # 1860 # 1861 # def __add__( self, other ): 1862 # pass 1863 # 1864 # 1865 # def copy(self): 1866 # pass 1867 # 1868 # 1869 # def selectFeatures(self, ids=None, sort=True, groups=None): 1870 # pass 1871 # 1872 # 1873 # def applyMapper(self, featuresmapper=None, samplesmapper=None, 1874 # train=True): 1875 # pass 1876 # 1877 # 1878 # def selectSamples(self, ids): 1879 # pass 1880 # 1881 # 1882 # def index(self, *args, **kwargs): 1883 # pass 1884 # 1885 # 1886 # def select(self, *args, **kwargs): 1887 # pass 1888 # 1889 # 1890 # def where(self, *args, **kwargs): 1891 # pass 1892 # 1893 # 1894 # def __getitem__(self, *args): 1895 # pass 1896 # 1897 # 1898 # def permuteLabels(self, status, perchunk=True, assure_permute=False): 1899 # pass 1900 # 1901 # 1902 # def getRandomSamples(self, nperlabel): 1903 # pass 1904 # 1905 # 1906 # def getLabelsMap(self): 1907 # pass 1908 # 1909 # 1910 # def setLabelsMap(self, lm): 1911 # pass 1912 # 1913 # 1914 # def setSamplesDType(self, dtype): 1915 # pass 1916 # 1917 # 1918 # def defineFeatureGroups(self, definition): 1919 # pass 1920 # 1921 # 1922 # def convertFeatureIds2FeatureMask(self, ids): 1923 # pass 1924 # 1925 # 1926 # def convertFeatureMask2FeatureIds(self, mask): 1927 # pass 1928