
import glob
import sys
from chardet.universaldetector import UniversalDetector
import pepe.p_filesystem.dfstats as dfs

def detect_encoding(filepath):
    ''' 
        import pepe.p_utils.detect_encoding as de
        print de.detect_encoding(f)['encoding']
    '''
    r = {'f': filepath}
    #print filename.ljust(60),
    detector = UniversalDetector()
    detector.reset()

    finfo = dfs.dfstats(filepath, options=[])
    r.update(finfo)

    # process big files
    if r['size'] > 200000:
        CHUNKSIZE = 200000
        fi = open(filepath, "rb")
        bytes_read = fi.read(CHUNKSIZE)
        fi.close()
        detector.feed(bytes_read)

    else: # process small files
        for line in file(filepath, 'rb'):
            detector.feed(line)
            if detector.done: break

    detector.close()
    r.update(detector.result)
    #print r
    print "%s\t%s\t%s" % (  r['f'], 
                            r['encoding'], 
                            str(r['confidence']*100) + '%') 
    return r

if __name__ == "__main__":
    ''' '''
    if  len(sys.argv) == 2:
        detect_encoding(sys.argv[1])
    else:
        print detect_encoding.__doc__
