#!/usr/bin/env python
#-*- coding:utf-8 -*-
import os,sys
from wwpy.util.Threads import ThreadPool
from wwpy.file.File import File
from wwpy.io.Reader import reader
from wwpy.io.Writer import Writer
from wwpy.util.Error import ImplementError

class BigData(object):
    
    def __init__(self,input=None,output=None,threads=4):
        self.input=input
        self.output=output
        self.threads=threads
        self._input_sep='\t'
        self._output_sep='\t'
        self._line_sep='\n'
        self._is_split=False
        
    def input_sep(self,sep=None):
        if sep is not None:
            self._input_sep=sep
        return self._input_sep
    
    def output_sep(self,sep=None):
        if sep is not None:
            self._output_sep=sep
        return self._output_sep
    
    def line_sep(self,sep=None):
        if sep is not None:
            self._line_sep=sep
        return self._line_sep
    
    def is_split(self,is_split=None):
        if is_split is not None:
            self._is_split=is_split
        return self._is_split
    
    def processor(self,key,value):
        return self.mapper(key,value)
    
    def get_parts(self):
        is_split=self.is_split()
        source=File(self.input)
        if is_split:
            from wwpy.file.Spliter import Spliter
            def split_source(source):
                s=Spliter(source)
                return s.split()
            tp=ThreadPool(self.threads)
            for source in source.sources:
                if not os.path.isfile(source):
                    continue
                tp.put(split_source,source)
            tp.wait()
            parts=[]
            for out in tp.outs():
                parts.extend(out[-1])
            return parts
        else:
            return source.sources
    
    def read(self):
        tp=ThreadPool(self.threads)
        for part in self.get_parts():
            tp.put(self.read_part,part)
        tp.wait()
        for out in tp.outs():
            for line in out[-1]:
                yield line
                
    def read_part(self,part):
        r=reader.read(part)
        try:
            lines=self.reader(r).lines
        except:
            lines=r.lines
        line_sep=self.line_sep()
        line_results=[]
        for line in lines:
            if str(line).endswith(line_sep):
                line=line[:-1]
            line=self.processor(part,line)
            if line is not None:
                if hasattr(line,'next'):
                    for fields in line:
                        line_results.append(fields)
                else:
                    line_results.append(line)
        return line_results
    
    def read_to_file(self):
        self.read_to_files()
        
    def get_fkey(self,line=None):
        return 'all'
    
    def read_to_files(self,key_files=None):
        if key_files is None:
            key_files={'all':self.output}
        if not isinstance(key_files,dict):
            raise "key_files's type must be dict." 
        key_fs={}
        for fkey,key_file in key_files.items():
            if isinstance(key_file,file):
                key_fs[fkey]=key_file
            else:
                key_fs[fkey]=open(key_file,'w')
        tp=ThreadPool(self.threads)
        for part in self.get_parts():
            tp.put(self.read_part_to_files,part,key_fs)
        tp.wait()
        
    def read_part_to_files(self,part,key_fs):
        r=reader.read(part)
        try:
            lines=self.reader(r).lines
        except:
            lines=r.lines
        line_sep=self.line_sep()
        output_sep=self.output_sep()
        writer=Writer()
        for line in lines:
            if str(line).endswith(line_sep):
                line=line[:-1]
            line=self.processor(part,line)
            if line is not None:
                writer.sep(output_sep)
                fkey=self.get_fkey(line)
                f=key_fs.get(fkey,None)
                if f is not None:
                    writer.write_line(f,line)
                    
    def reader(self,reader):
        raise ImplementError("reader func need be implemented.")
    
    def mapper(self,key,value):
        raise ImplementError("mapper func need be implemented.")
    
    def combiner(self,key,values):
        raise ImplementError("combiner func need be implemented.")
    
    def reducer(self,key,values):
        raise ImplementError("reducer func need be implemented.")
    
    def handler(self,results):
        """
        handle results into final results.
        """
        return results
    
    def load(self,**kwargs):
        db,table,table_fields=kwargs.pop('db','db_58_ib'),kwargs.pop('table'),kwargs.pop('table_fields',None)
        from wwpy.db.DBI import DBI,dbc
        dbi=DBI(**dbc[db])
        if (not dbi.has_table(table)) and table_fields is None:
            print 'No table %s or no designed table_fields.' % table
            sys.exit()
        sep=self.output_sep()
        dbi.save(self.output,table,sep=sep,table_fields=table_fields,**kwargs)
        dbi.close()
    
    @property
    def has_reducer(self):
        try:
            self.reducer('1',[])
            return True
        except:
            return False
        
    def do(self,stream):
        if not self.has_reducer:
            return self.handler(stream)
        else:
            info={}
            for key,value in stream:
                info.setdefault(key,[]).append(value)
            reduce_results=[]
            for key,values in info.iteritems():
                result=self.reducer(key,values)
                if result is not None:
                    if hasattr(result,'next'):
                        for key,value in result:
                            reduce_results.append((key,value))
                    else:
                        reduce_results.append(result)
            return self.handler(reduce_results)
        
    def write(self,stream):
        writer=Writer()
        writer.output(self.output)
        writer.sep(self.output_sep())
        writer.info(self.do(stream))
        writer.write()
        
    def run(self):
        self.write(self.read())
        
    def scan(self):
        for line in open(self.output):
            print line[:-1]
            
def demo1():
    from wwpy.util.Time import Time
    def get_urls(date=Time.yesterday('')):
        urls=[]
        for hour in range(24):
            hour='0%s' % hour if hour<10 else hour
            url="http://10.5.12.167:50075/streamFile/dsap/rawdata/jzcpclog/%s/jingzhun_clicks_%s%s" %(date,date,hour)
            urls.append(url)
        return urls
    bd=BigData()
    bd.input=get_urls()
    bd.output='test_url.txt'
    bd.reader=lambda r:r.split('\001').group([7,6]).sum(25)
    bd.mapper=lambda key,value:(tuple(value[0:2]),value[2])
    bd.reducer=lambda key,values:(key,sum(values))
    bd.run()
    
def demo2():
    def get_sqls():
        sql=lambda ym:"select city1,sum(click_count) from db_58_ib.hy_click_info_%s group by city1" % ym
        sqls=map(sql,(201307,201308,201309))
        return sqls
    bd=BigData()
    bd.input=get_sqls()
    bd.output='test_sql.txt'
    bd.mapper=lambda key,value:(value[0],value[1])
    bd.reducer=lambda key,values:(key,sum(values))
    bd.get_fkey=lambda line:hash(line[0])%3
    bd.read_to_files({0:'ts0.txt',1:'ts1.txt',2:'ts2.txt'})
    #bd.run()
    #bd.scan()
    
if __name__ == '__main__':
    demo2()
    
