| 1 | n/a | |
|---|
| 2 | n/a | """ |
|---|
| 3 | n/a | csv.py - read/write/investigate CSV files |
|---|
| 4 | n/a | """ |
|---|
| 5 | n/a | |
|---|
| 6 | n/a | import re |
|---|
| 7 | n/a | from _csv import Error, __version__, writer, reader, register_dialect, \ |
|---|
| 8 | n/a | unregister_dialect, get_dialect, list_dialects, \ |
|---|
| 9 | n/a | field_size_limit, \ |
|---|
| 10 | n/a | QUOTE_MINIMAL, QUOTE_ALL, QUOTE_NONNUMERIC, QUOTE_NONE, \ |
|---|
| 11 | n/a | __doc__ |
|---|
| 12 | n/a | from _csv import Dialect as _Dialect |
|---|
| 13 | n/a | |
|---|
| 14 | n/a | from collections import OrderedDict |
|---|
| 15 | n/a | from io import StringIO |
|---|
| 16 | n/a | |
|---|
| 17 | n/a | __all__ = ["QUOTE_MINIMAL", "QUOTE_ALL", "QUOTE_NONNUMERIC", "QUOTE_NONE", |
|---|
| 18 | n/a | "Error", "Dialect", "__doc__", "excel", "excel_tab", |
|---|
| 19 | n/a | "field_size_limit", "reader", "writer", |
|---|
| 20 | n/a | "register_dialect", "get_dialect", "list_dialects", "Sniffer", |
|---|
| 21 | n/a | "unregister_dialect", "__version__", "DictReader", "DictWriter", |
|---|
| 22 | n/a | "unix_dialect"] |
|---|
| 23 | n/a | |
|---|
| 24 | n/a | class Dialect: |
|---|
| 25 | n/a | """Describe a CSV dialect. |
|---|
| 26 | n/a | |
|---|
| 27 | n/a | This must be subclassed (see csv.excel). Valid attributes are: |
|---|
| 28 | n/a | delimiter, quotechar, escapechar, doublequote, skipinitialspace, |
|---|
| 29 | n/a | lineterminator, quoting. |
|---|
| 30 | n/a | |
|---|
| 31 | n/a | """ |
|---|
| 32 | n/a | _name = "" |
|---|
| 33 | n/a | _valid = False |
|---|
| 34 | n/a | # placeholders |
|---|
| 35 | n/a | delimiter = None |
|---|
| 36 | n/a | quotechar = None |
|---|
| 37 | n/a | escapechar = None |
|---|
| 38 | n/a | doublequote = None |
|---|
| 39 | n/a | skipinitialspace = None |
|---|
| 40 | n/a | lineterminator = None |
|---|
| 41 | n/a | quoting = None |
|---|
| 42 | n/a | |
|---|
| 43 | n/a | def __init__(self): |
|---|
| 44 | n/a | if self.__class__ != Dialect: |
|---|
| 45 | n/a | self._valid = True |
|---|
| 46 | n/a | self._validate() |
|---|
| 47 | n/a | |
|---|
| 48 | n/a | def _validate(self): |
|---|
| 49 | n/a | try: |
|---|
| 50 | n/a | _Dialect(self) |
|---|
| 51 | n/a | except TypeError as e: |
|---|
| 52 | n/a | # We do this for compatibility with py2.3 |
|---|
| 53 | n/a | raise Error(str(e)) |
|---|
| 54 | n/a | |
|---|
| 55 | n/a | class excel(Dialect): |
|---|
| 56 | n/a | """Describe the usual properties of Excel-generated CSV files.""" |
|---|
| 57 | n/a | delimiter = ',' |
|---|
| 58 | n/a | quotechar = '"' |
|---|
| 59 | n/a | doublequote = True |
|---|
| 60 | n/a | skipinitialspace = False |
|---|
| 61 | n/a | lineterminator = '\r\n' |
|---|
| 62 | n/a | quoting = QUOTE_MINIMAL |
|---|
| 63 | n/a | register_dialect("excel", excel) |
|---|
| 64 | n/a | |
|---|
| 65 | n/a | class excel_tab(excel): |
|---|
| 66 | n/a | """Describe the usual properties of Excel-generated TAB-delimited files.""" |
|---|
| 67 | n/a | delimiter = '\t' |
|---|
| 68 | n/a | register_dialect("excel-tab", excel_tab) |
|---|
| 69 | n/a | |
|---|
| 70 | n/a | class unix_dialect(Dialect): |
|---|
| 71 | n/a | """Describe the usual properties of Unix-generated CSV files.""" |
|---|
| 72 | n/a | delimiter = ',' |
|---|
| 73 | n/a | quotechar = '"' |
|---|
| 74 | n/a | doublequote = True |
|---|
| 75 | n/a | skipinitialspace = False |
|---|
| 76 | n/a | lineterminator = '\n' |
|---|
| 77 | n/a | quoting = QUOTE_ALL |
|---|
| 78 | n/a | register_dialect("unix", unix_dialect) |
|---|
| 79 | n/a | |
|---|
| 80 | n/a | |
|---|
| 81 | n/a | class DictReader: |
|---|
| 82 | n/a | def __init__(self, f, fieldnames=None, restkey=None, restval=None, |
|---|
| 83 | n/a | dialect="excel", *args, **kwds): |
|---|
| 84 | n/a | self._fieldnames = fieldnames # list of keys for the dict |
|---|
| 85 | n/a | self.restkey = restkey # key to catch long rows |
|---|
| 86 | n/a | self.restval = restval # default value for short rows |
|---|
| 87 | n/a | self.reader = reader(f, dialect, *args, **kwds) |
|---|
| 88 | n/a | self.dialect = dialect |
|---|
| 89 | n/a | self.line_num = 0 |
|---|
| 90 | n/a | |
|---|
| 91 | n/a | def __iter__(self): |
|---|
| 92 | n/a | return self |
|---|
| 93 | n/a | |
|---|
| 94 | n/a | @property |
|---|
| 95 | n/a | def fieldnames(self): |
|---|
| 96 | n/a | if self._fieldnames is None: |
|---|
| 97 | n/a | try: |
|---|
| 98 | n/a | self._fieldnames = next(self.reader) |
|---|
| 99 | n/a | except StopIteration: |
|---|
| 100 | n/a | pass |
|---|
| 101 | n/a | self.line_num = self.reader.line_num |
|---|
| 102 | n/a | return self._fieldnames |
|---|
| 103 | n/a | |
|---|
| 104 | n/a | @fieldnames.setter |
|---|
| 105 | n/a | def fieldnames(self, value): |
|---|
| 106 | n/a | self._fieldnames = value |
|---|
| 107 | n/a | |
|---|
| 108 | n/a | def __next__(self): |
|---|
| 109 | n/a | if self.line_num == 0: |
|---|
| 110 | n/a | # Used only for its side effect. |
|---|
| 111 | n/a | self.fieldnames |
|---|
| 112 | n/a | row = next(self.reader) |
|---|
| 113 | n/a | self.line_num = self.reader.line_num |
|---|
| 114 | n/a | |
|---|
| 115 | n/a | # unlike the basic reader, we prefer not to return blanks, |
|---|
| 116 | n/a | # because we will typically wind up with a dict full of None |
|---|
| 117 | n/a | # values |
|---|
| 118 | n/a | while row == []: |
|---|
| 119 | n/a | row = next(self.reader) |
|---|
| 120 | n/a | d = OrderedDict(zip(self.fieldnames, row)) |
|---|
| 121 | n/a | lf = len(self.fieldnames) |
|---|
| 122 | n/a | lr = len(row) |
|---|
| 123 | n/a | if lf < lr: |
|---|
| 124 | n/a | d[self.restkey] = row[lf:] |
|---|
| 125 | n/a | elif lf > lr: |
|---|
| 126 | n/a | for key in self.fieldnames[lr:]: |
|---|
| 127 | n/a | d[key] = self.restval |
|---|
| 128 | n/a | return d |
|---|
| 129 | n/a | |
|---|
| 130 | n/a | |
|---|
| 131 | n/a | class DictWriter: |
|---|
| 132 | n/a | def __init__(self, f, fieldnames, restval="", extrasaction="raise", |
|---|
| 133 | n/a | dialect="excel", *args, **kwds): |
|---|
| 134 | n/a | self.fieldnames = fieldnames # list of keys for the dict |
|---|
| 135 | n/a | self.restval = restval # for writing short dicts |
|---|
| 136 | n/a | if extrasaction.lower() not in ("raise", "ignore"): |
|---|
| 137 | n/a | raise ValueError("extrasaction (%s) must be 'raise' or 'ignore'" |
|---|
| 138 | n/a | % extrasaction) |
|---|
| 139 | n/a | self.extrasaction = extrasaction |
|---|
| 140 | n/a | self.writer = writer(f, dialect, *args, **kwds) |
|---|
| 141 | n/a | |
|---|
| 142 | n/a | def writeheader(self): |
|---|
| 143 | n/a | header = dict(zip(self.fieldnames, self.fieldnames)) |
|---|
| 144 | n/a | self.writerow(header) |
|---|
| 145 | n/a | |
|---|
| 146 | n/a | def _dict_to_list(self, rowdict): |
|---|
| 147 | n/a | if self.extrasaction == "raise": |
|---|
| 148 | n/a | wrong_fields = rowdict.keys() - self.fieldnames |
|---|
| 149 | n/a | if wrong_fields: |
|---|
| 150 | n/a | raise ValueError("dict contains fields not in fieldnames: " |
|---|
| 151 | n/a | + ", ".join([repr(x) for x in wrong_fields])) |
|---|
| 152 | n/a | return (rowdict.get(key, self.restval) for key in self.fieldnames) |
|---|
| 153 | n/a | |
|---|
| 154 | n/a | def writerow(self, rowdict): |
|---|
| 155 | n/a | return self.writer.writerow(self._dict_to_list(rowdict)) |
|---|
| 156 | n/a | |
|---|
| 157 | n/a | def writerows(self, rowdicts): |
|---|
| 158 | n/a | return self.writer.writerows(map(self._dict_to_list, rowdicts)) |
|---|
| 159 | n/a | |
|---|
| 160 | n/a | # Guard Sniffer's type checking against builds that exclude complex() |
|---|
| 161 | n/a | try: |
|---|
| 162 | n/a | complex |
|---|
| 163 | n/a | except NameError: |
|---|
| 164 | n/a | complex = float |
|---|
| 165 | n/a | |
|---|
| 166 | n/a | class Sniffer: |
|---|
| 167 | n/a | ''' |
|---|
| 168 | n/a | "Sniffs" the format of a CSV file (i.e. delimiter, quotechar) |
|---|
| 169 | n/a | Returns a Dialect object. |
|---|
| 170 | n/a | ''' |
|---|
| 171 | n/a | def __init__(self): |
|---|
| 172 | n/a | # in case there is more than one possible delimiter |
|---|
| 173 | n/a | self.preferred = [',', '\t', ';', ' ', ':'] |
|---|
| 174 | n/a | |
|---|
| 175 | n/a | |
|---|
| 176 | n/a | def sniff(self, sample, delimiters=None): |
|---|
| 177 | n/a | """ |
|---|
| 178 | n/a | Returns a dialect (or None) corresponding to the sample |
|---|
| 179 | n/a | """ |
|---|
| 180 | n/a | |
|---|
| 181 | n/a | quotechar, doublequote, delimiter, skipinitialspace = \ |
|---|
| 182 | n/a | self._guess_quote_and_delimiter(sample, delimiters) |
|---|
| 183 | n/a | if not delimiter: |
|---|
| 184 | n/a | delimiter, skipinitialspace = self._guess_delimiter(sample, |
|---|
| 185 | n/a | delimiters) |
|---|
| 186 | n/a | |
|---|
| 187 | n/a | if not delimiter: |
|---|
| 188 | n/a | raise Error("Could not determine delimiter") |
|---|
| 189 | n/a | |
|---|
| 190 | n/a | class dialect(Dialect): |
|---|
| 191 | n/a | _name = "sniffed" |
|---|
| 192 | n/a | lineterminator = '\r\n' |
|---|
| 193 | n/a | quoting = QUOTE_MINIMAL |
|---|
| 194 | n/a | # escapechar = '' |
|---|
| 195 | n/a | |
|---|
| 196 | n/a | dialect.doublequote = doublequote |
|---|
| 197 | n/a | dialect.delimiter = delimiter |
|---|
| 198 | n/a | # _csv.reader won't accept a quotechar of '' |
|---|
| 199 | n/a | dialect.quotechar = quotechar or '"' |
|---|
| 200 | n/a | dialect.skipinitialspace = skipinitialspace |
|---|
| 201 | n/a | |
|---|
| 202 | n/a | return dialect |
|---|
| 203 | n/a | |
|---|
| 204 | n/a | |
|---|
| 205 | n/a | def _guess_quote_and_delimiter(self, data, delimiters): |
|---|
| 206 | n/a | """ |
|---|
| 207 | n/a | Looks for text enclosed between two identical quotes |
|---|
| 208 | n/a | (the probable quotechar) which are preceded and followed |
|---|
| 209 | n/a | by the same character (the probable delimiter). |
|---|
| 210 | n/a | For example: |
|---|
| 211 | n/a | ,'some text', |
|---|
| 212 | n/a | The quote with the most wins, same with the delimiter. |
|---|
| 213 | n/a | If there is no quotechar the delimiter can't be determined |
|---|
| 214 | n/a | this way. |
|---|
| 215 | n/a | """ |
|---|
| 216 | n/a | |
|---|
| 217 | n/a | matches = [] |
|---|
| 218 | n/a | for restr in (r'(?P<delim>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?P=delim)', # ,".*?", |
|---|
| 219 | n/a | r'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?P<delim>[^\w\n"\'])(?P<space> ?)', # ".*?", |
|---|
| 220 | n/a | r'(?P<delim>>[^\w\n"\'])(?P<space> ?)(?P<quote>["\']).*?(?P=quote)(?:$|\n)', # ,".*?" |
|---|
| 221 | n/a | r'(?:^|\n)(?P<quote>["\']).*?(?P=quote)(?:$|\n)'): # ".*?" (no delim, no space) |
|---|
| 222 | n/a | regexp = re.compile(restr, re.DOTALL | re.MULTILINE) |
|---|
| 223 | n/a | matches = regexp.findall(data) |
|---|
| 224 | n/a | if matches: |
|---|
| 225 | n/a | break |
|---|
| 226 | n/a | |
|---|
| 227 | n/a | if not matches: |
|---|
| 228 | n/a | # (quotechar, doublequote, delimiter, skipinitialspace) |
|---|
| 229 | n/a | return ('', False, None, 0) |
|---|
| 230 | n/a | quotes = {} |
|---|
| 231 | n/a | delims = {} |
|---|
| 232 | n/a | spaces = 0 |
|---|
| 233 | n/a | groupindex = regexp.groupindex |
|---|
| 234 | n/a | for m in matches: |
|---|
| 235 | n/a | n = groupindex['quote'] - 1 |
|---|
| 236 | n/a | key = m[n] |
|---|
| 237 | n/a | if key: |
|---|
| 238 | n/a | quotes[key] = quotes.get(key, 0) + 1 |
|---|
| 239 | n/a | try: |
|---|
| 240 | n/a | n = groupindex['delim'] - 1 |
|---|
| 241 | n/a | key = m[n] |
|---|
| 242 | n/a | except KeyError: |
|---|
| 243 | n/a | continue |
|---|
| 244 | n/a | if key and (delimiters is None or key in delimiters): |
|---|
| 245 | n/a | delims[key] = delims.get(key, 0) + 1 |
|---|
| 246 | n/a | try: |
|---|
| 247 | n/a | n = groupindex['space'] - 1 |
|---|
| 248 | n/a | except KeyError: |
|---|
| 249 | n/a | continue |
|---|
| 250 | n/a | if m[n]: |
|---|
| 251 | n/a | spaces += 1 |
|---|
| 252 | n/a | |
|---|
| 253 | n/a | quotechar = max(quotes, key=quotes.get) |
|---|
| 254 | n/a | |
|---|
| 255 | n/a | if delims: |
|---|
| 256 | n/a | delim = max(delims, key=delims.get) |
|---|
| 257 | n/a | skipinitialspace = delims[delim] == spaces |
|---|
| 258 | n/a | if delim == '\n': # most likely a file with a single column |
|---|
| 259 | n/a | delim = '' |
|---|
| 260 | n/a | else: |
|---|
| 261 | n/a | # there is *no* delimiter, it's a single column of quoted data |
|---|
| 262 | n/a | delim = '' |
|---|
| 263 | n/a | skipinitialspace = 0 |
|---|
| 264 | n/a | |
|---|
| 265 | n/a | # if we see an extra quote between delimiters, we've got a |
|---|
| 266 | n/a | # double quoted format |
|---|
| 267 | n/a | dq_regexp = re.compile( |
|---|
| 268 | n/a | r"((%(delim)s)|^)\W*%(quote)s[^%(delim)s\n]*%(quote)s[^%(delim)s\n]*%(quote)s\W*((%(delim)s)|$)" % \ |
|---|
| 269 | n/a | {'delim':re.escape(delim), 'quote':quotechar}, re.MULTILINE) |
|---|
| 270 | n/a | |
|---|
| 271 | n/a | |
|---|
| 272 | n/a | |
|---|
| 273 | n/a | if dq_regexp.search(data): |
|---|
| 274 | n/a | doublequote = True |
|---|
| 275 | n/a | else: |
|---|
| 276 | n/a | doublequote = False |
|---|
| 277 | n/a | |
|---|
| 278 | n/a | return (quotechar, doublequote, delim, skipinitialspace) |
|---|
| 279 | n/a | |
|---|
| 280 | n/a | |
|---|
| 281 | n/a | def _guess_delimiter(self, data, delimiters): |
|---|
| 282 | n/a | """ |
|---|
| 283 | n/a | The delimiter /should/ occur the same number of times on |
|---|
| 284 | n/a | each row. However, due to malformed data, it may not. We don't want |
|---|
| 285 | n/a | an all or nothing approach, so we allow for small variations in this |
|---|
| 286 | n/a | number. |
|---|
| 287 | n/a | 1) build a table of the frequency of each character on every line. |
|---|
| 288 | n/a | 2) build a table of frequencies of this frequency (meta-frequency?), |
|---|
| 289 | n/a | e.g. 'x occurred 5 times in 10 rows, 6 times in 1000 rows, |
|---|
| 290 | n/a | 7 times in 2 rows' |
|---|
| 291 | n/a | 3) use the mode of the meta-frequency to determine the /expected/ |
|---|
| 292 | n/a | frequency for that character |
|---|
| 293 | n/a | 4) find out how often the character actually meets that goal |
|---|
| 294 | n/a | 5) the character that best meets its goal is the delimiter |
|---|
| 295 | n/a | For performance reasons, the data is evaluated in chunks, so it can |
|---|
| 296 | n/a | try and evaluate the smallest portion of the data possible, evaluating |
|---|
| 297 | n/a | additional chunks as necessary. |
|---|
| 298 | n/a | """ |
|---|
| 299 | n/a | |
|---|
| 300 | n/a | data = list(filter(None, data.split('\n'))) |
|---|
| 301 | n/a | |
|---|
| 302 | n/a | ascii = [chr(c) for c in range(127)] # 7-bit ASCII |
|---|
| 303 | n/a | |
|---|
| 304 | n/a | # build frequency tables |
|---|
| 305 | n/a | chunkLength = min(10, len(data)) |
|---|
| 306 | n/a | iteration = 0 |
|---|
| 307 | n/a | charFrequency = {} |
|---|
| 308 | n/a | modes = {} |
|---|
| 309 | n/a | delims = {} |
|---|
| 310 | n/a | start, end = 0, chunkLength |
|---|
| 311 | n/a | while start < len(data): |
|---|
| 312 | n/a | iteration += 1 |
|---|
| 313 | n/a | for line in data[start:end]: |
|---|
| 314 | n/a | for char in ascii: |
|---|
| 315 | n/a | metaFrequency = charFrequency.get(char, {}) |
|---|
| 316 | n/a | # must count even if frequency is 0 |
|---|
| 317 | n/a | freq = line.count(char) |
|---|
| 318 | n/a | # value is the mode |
|---|
| 319 | n/a | metaFrequency[freq] = metaFrequency.get(freq, 0) + 1 |
|---|
| 320 | n/a | charFrequency[char] = metaFrequency |
|---|
| 321 | n/a | |
|---|
| 322 | n/a | for char in charFrequency.keys(): |
|---|
| 323 | n/a | items = list(charFrequency[char].items()) |
|---|
| 324 | n/a | if len(items) == 1 and items[0][0] == 0: |
|---|
| 325 | n/a | continue |
|---|
| 326 | n/a | # get the mode of the frequencies |
|---|
| 327 | n/a | if len(items) > 1: |
|---|
| 328 | n/a | modes[char] = max(items, key=lambda x: x[1]) |
|---|
| 329 | n/a | # adjust the mode - subtract the sum of all |
|---|
| 330 | n/a | # other frequencies |
|---|
| 331 | n/a | items.remove(modes[char]) |
|---|
| 332 | n/a | modes[char] = (modes[char][0], modes[char][1] |
|---|
| 333 | n/a | - sum(item[1] for item in items)) |
|---|
| 334 | n/a | else: |
|---|
| 335 | n/a | modes[char] = items[0] |
|---|
| 336 | n/a | |
|---|
| 337 | n/a | # build a list of possible delimiters |
|---|
| 338 | n/a | modeList = modes.items() |
|---|
| 339 | n/a | total = float(min(chunkLength * iteration, len(data))) |
|---|
| 340 | n/a | # (rows of consistent data) / (number of rows) = 100% |
|---|
| 341 | n/a | consistency = 1.0 |
|---|
| 342 | n/a | # minimum consistency threshold |
|---|
| 343 | n/a | threshold = 0.9 |
|---|
| 344 | n/a | while len(delims) == 0 and consistency >= threshold: |
|---|
| 345 | n/a | for k, v in modeList: |
|---|
| 346 | n/a | if v[0] > 0 and v[1] > 0: |
|---|
| 347 | n/a | if ((v[1]/total) >= consistency and |
|---|
| 348 | n/a | (delimiters is None or k in delimiters)): |
|---|
| 349 | n/a | delims[k] = v |
|---|
| 350 | n/a | consistency -= 0.01 |
|---|
| 351 | n/a | |
|---|
| 352 | n/a | if len(delims) == 1: |
|---|
| 353 | n/a | delim = list(delims.keys())[0] |
|---|
| 354 | n/a | skipinitialspace = (data[0].count(delim) == |
|---|
| 355 | n/a | data[0].count("%c " % delim)) |
|---|
| 356 | n/a | return (delim, skipinitialspace) |
|---|
| 357 | n/a | |
|---|
| 358 | n/a | # analyze another chunkLength lines |
|---|
| 359 | n/a | start = end |
|---|
| 360 | n/a | end += chunkLength |
|---|
| 361 | n/a | |
|---|
| 362 | n/a | if not delims: |
|---|
| 363 | n/a | return ('', 0) |
|---|
| 364 | n/a | |
|---|
| 365 | n/a | # if there's more than one, fall back to a 'preferred' list |
|---|
| 366 | n/a | if len(delims) > 1: |
|---|
| 367 | n/a | for d in self.preferred: |
|---|
| 368 | n/a | if d in delims.keys(): |
|---|
| 369 | n/a | skipinitialspace = (data[0].count(d) == |
|---|
| 370 | n/a | data[0].count("%c " % d)) |
|---|
| 371 | n/a | return (d, skipinitialspace) |
|---|
| 372 | n/a | |
|---|
| 373 | n/a | # nothing else indicates a preference, pick the character that |
|---|
| 374 | n/a | # dominates(?) |
|---|
| 375 | n/a | items = [(v,k) for (k,v) in delims.items()] |
|---|
| 376 | n/a | items.sort() |
|---|
| 377 | n/a | delim = items[-1][1] |
|---|
| 378 | n/a | |
|---|
| 379 | n/a | skipinitialspace = (data[0].count(delim) == |
|---|
| 380 | n/a | data[0].count("%c " % delim)) |
|---|
| 381 | n/a | return (delim, skipinitialspace) |
|---|
| 382 | n/a | |
|---|
| 383 | n/a | |
|---|
| 384 | n/a | def has_header(self, sample): |
|---|
| 385 | n/a | # Creates a dictionary of types of data in each column. If any |
|---|
| 386 | n/a | # column is of a single type (say, integers), *except* for the first |
|---|
| 387 | n/a | # row, then the first row is presumed to be labels. If the type |
|---|
| 388 | n/a | # can't be determined, it is assumed to be a string in which case |
|---|
| 389 | n/a | # the length of the string is the determining factor: if all of the |
|---|
| 390 | n/a | # rows except for the first are the same length, it's a header. |
|---|
| 391 | n/a | # Finally, a 'vote' is taken at the end for each column, adding or |
|---|
| 392 | n/a | # subtracting from the likelihood of the first row being a header. |
|---|
| 393 | n/a | |
|---|
| 394 | n/a | rdr = reader(StringIO(sample), self.sniff(sample)) |
|---|
| 395 | n/a | |
|---|
| 396 | n/a | header = next(rdr) # assume first row is header |
|---|
| 397 | n/a | |
|---|
| 398 | n/a | columns = len(header) |
|---|
| 399 | n/a | columnTypes = {} |
|---|
| 400 | n/a | for i in range(columns): columnTypes[i] = None |
|---|
| 401 | n/a | |
|---|
| 402 | n/a | checked = 0 |
|---|
| 403 | n/a | for row in rdr: |
|---|
| 404 | n/a | # arbitrary number of rows to check, to keep it sane |
|---|
| 405 | n/a | if checked > 20: |
|---|
| 406 | n/a | break |
|---|
| 407 | n/a | checked += 1 |
|---|
| 408 | n/a | |
|---|
| 409 | n/a | if len(row) != columns: |
|---|
| 410 | n/a | continue # skip rows that have irregular number of columns |
|---|
| 411 | n/a | |
|---|
| 412 | n/a | for col in list(columnTypes.keys()): |
|---|
| 413 | n/a | |
|---|
| 414 | n/a | for thisType in [int, float, complex]: |
|---|
| 415 | n/a | try: |
|---|
| 416 | n/a | thisType(row[col]) |
|---|
| 417 | n/a | break |
|---|
| 418 | n/a | except (ValueError, OverflowError): |
|---|
| 419 | n/a | pass |
|---|
| 420 | n/a | else: |
|---|
| 421 | n/a | # fallback to length of string |
|---|
| 422 | n/a | thisType = len(row[col]) |
|---|
| 423 | n/a | |
|---|
| 424 | n/a | if thisType != columnTypes[col]: |
|---|
| 425 | n/a | if columnTypes[col] is None: # add new column type |
|---|
| 426 | n/a | columnTypes[col] = thisType |
|---|
| 427 | n/a | else: |
|---|
| 428 | n/a | # type is inconsistent, remove column from |
|---|
| 429 | n/a | # consideration |
|---|
| 430 | n/a | del columnTypes[col] |
|---|
| 431 | n/a | |
|---|
| 432 | n/a | # finally, compare results against first row and "vote" |
|---|
| 433 | n/a | # on whether it's a header |
|---|
| 434 | n/a | hasHeader = 0 |
|---|
| 435 | n/a | for col, colType in columnTypes.items(): |
|---|
| 436 | n/a | if type(colType) == type(0): # it's a length |
|---|
| 437 | n/a | if len(header[col]) != colType: |
|---|
| 438 | n/a | hasHeader += 1 |
|---|
| 439 | n/a | else: |
|---|
| 440 | n/a | hasHeader -= 1 |
|---|
| 441 | n/a | else: # attempt typecast |
|---|
| 442 | n/a | try: |
|---|
| 443 | n/a | colType(header[col]) |
|---|
| 444 | n/a | except (ValueError, TypeError): |
|---|
| 445 | n/a | hasHeader += 1 |
|---|
| 446 | n/a | else: |
|---|
| 447 | n/a | hasHeader -= 1 |
|---|
| 448 | n/a | |
|---|
| 449 | n/a | return hasHeader > 0 |
|---|