#!/usr/bin/python
# -*- coding: utf-8 -*-
import csv
import os
import threading
import sys
import struct
from Queue import Queue, Empty
from util.logger import error_log, warn_log

from util.opends import DS
from util.app_global import cache_path
from util.logger import process_log

__author__ = 'xiwei'

TYPE_MAPPER = {
    'DWORD': ('I', 4),
    'WORD': ('H', 2),
    'uint8': ('B', 1),
    'uint16': ('H', 2),
    'uint32': ('I', 4),
    'uint64': ('Q', 8),
}


def quote(row):
    return map(lambda x: None if x is None else '\'%s\'' % x, row)


def dequote(row):
    return map(lambda x: None if x == '' else x[1:-1], row)


class CSVSync(object):
    """
        Row seek for cached csv file
    """

    def __init__(self, filename=None, delimiter=','):
        if not os.path.exists(filename):
            raise Exception(u'File %s is not exists.' % filename)
        self.filename = os.path.abspath(filename)
        self.delimiter = delimiter
        self.position = 0
        self.error = []
        csv.field_size_limit(sys.maxsize)
        self.length = self.__len__()

    def __len__(self):
        with open(self.filename, 'r') as f:
            length = sum(1 for _ in f)
        return length

    def seek(self, position):
        self.position = position

    def _row_process(self, line):
        line = dequote(line)
        return map(self.utf8, line)

    def utf8(self, data):
        try:
            return u'%s' % data if data is not None else None
        except Exception, e:
            if str(e) not in self.error:
                self.error.append(str(e))
                warn_log().warn('Data utf8 decode error and has been ignored. reason:%s' % str(e))
            return None

    def fetch(self, breakpoint=-1):
        with open(self.filename, 'r') as stream:
            reader = csv.reader(stream)
            for line in reader:
                if breakpoint <= 0:
                    yield self._row_process(line)
                else:
                    breakpoint -= 1

    def sync(self):
        pass

    def __iter__(self):
        pass

    def close(self):
        if os.path.exists(self.filename):
            os.remove(self.filename)

    def __del__(self):
        self.close()


class GEA(object):
    base_fields_name = {
        'CowId': ('uint16', 0),
        'RowNo': ('uint16', 4),
        'FixNo': ('uint16', 6),
        'Sensor': ('uint64', 8),
        'StatusCode': ('uint8', 0x10),
        'LeaveDate': ('uint16', 0x14),
        'Shed': ('uint16', 0x28),
        'BuyDate': ('uint16', 0x48)
    }

    data_fields_name = {
        'CowId': None,
        'Date': ('uint16', 0),
        'Time': ('uint16', 2),
        'Milk': ('uint16', 4),
        'ErrorCode': ('uint16', 6),
        'AverageFlow': ('uint16', 8),
        'Conductivity': ('uint16', 0x0a),
        'MilkTime': ('uint16', 0x0c),
        'RepeatTimes': ('uint16', 0x10),
        'CowPos': ('uint16', 0x12),
    }

    def __init__(self, filename=None, db_info=None, user_info=None):
        """
            A GEA file helper
        Args:
            filename (str): the path to GEA file.
        """
        self.base_headers = self.base_fields_name.keys()
        self.data_headers = self.data_fields_name.keys()

        if not db_info:
            self.filename = filename
            name = os.path.basename(self.filename)
        else:
            self.filename = db_info.get('file')
            name = os.path.basename(self.filename)
            self.base_schema = []
            self.data_schema = []

            self.base_schema = filter(lambda field: field.get('name') in self.base_headers, db_info.get('schema'))

            self.data_schema = filter(lambda field: field.get('name') in self.data_headers, db_info.get('schema'))

            self.ds = DS(name, domain=user_info['domain'], username=user_info['username'], password=user_info['password'])
        self.base_name = 'BaseInfo'
        self.data_name = name.split('.')[0]
        self.fd = open(self.filename, 'rb')
        self.max_size = os.stat(self.filename).st_size

    def read(self, data_type, offset):
        """
            Read data from GEA file, use struct to unpack binary stream.
        Args:
            data_type (str): the win32 type of data, reference from class TYPE_MAPPER.
            offset (int): the offset of data by begin of file.

        Returns:
            int
        """
        self.fd.seek(offset)
        try:
            ret = struct.unpack(TYPE_MAPPER[data_type][0], self.fd.read(TYPE_MAPPER[data_type][1]))[0]
        except struct.error, e:
            error_log().error("Error when unpack '%s' at '%s', reason: %s" % (TYPE_MAPPER[data_type], offset, e))
            return None
        return ret

    def record_count(self):
        """
            Get base info record count.
        Returns:
            int
        """
        return self.read('DWORD', 0x0000001C)

    def page_size(self):
        """
            Get page size.
        Returns:
            int
        """
        return self.read('DWORD', 0x00000010)

    def cow_id(self, index):
        """
            Get cow id by index
        Args:
            index (int): the index of cow.

        Returns:
            int
        """
        index_list_offset = int(self.read('DWORD', 0x000002A0))
        if index_list_offset == 0:
            return 0
        return self.read('DWORD', index_list_offset + index * 4)

    def get_record_address(self, index):
        # record_offset = self.read('DWORD', 0x2A0)
        # address = record_offset + self.page_size() * index
        record_offset = self.read('DWORD', 0x0C)
        address = record_offset + self.page_size() * (index + 1)
        return address

    def base_delta(self):
        off_0x0008 = self.read('WORD', 0x0008)
        return self.read('DWORD', (off_0x0008 * 3 + 0x06) * 0x08)

    def data_delta(self):
        off_0x0008 = self.read('WORD', 0x0008)
        return self.read('DWORD', (off_0x0008 * 3 + 0x0f) * 0x08)

    def base_record(self, name, index):
        record_offset = self.get_record_address(index) + self.base_delta()
        return self.read(self.base_fields_name[name][0], record_offset + self.base_fields_name[name][1])

    def data_record(self, name, index):
        if name == 'CowId':
            return self.cow_id(index)
        record_offset = self.get_record_address(index) + self.data_delta()
        return self.read(self.data_fields_name[name][0], record_offset + self.data_fields_name[name][1])

    def data_record_iter(self, header, index):
        record_offset = self.get_record_address(index) + self.data_delta()
        while record_offset <= self.max_size and self.read('uint16', offset=record_offset) != 0:
            row = []
            for k in header:
                if k == 'CowId':
                    row.append(self.cow_id(index))
                else:
                    col = self.read(self.data_fields_name[k][0], record_offset + self.data_fields_name[k][1])
                    row.append(col)
            yield row
            record_offset += 18

    def dump(self, base=None):
        """
            Dump base info to base_csv
        Args:
            base (bool): return base info if true

        Returns:
            str: csv file path
        """
        if base:
            headers = self.base_headers
            record = self.base_record
            name = self.base_name
        else:
            headers = self.data_headers
            record = self.data_record
            name = self.data_name

        if not os.path.exists(cache_path):
            os.mkdir(cache_path)

        csv_file_path = os.path.join(cache_path, self.ds.ds_id)
        if not os.path.exists(csv_file_path):
            os.mkdir(csv_file_path)
        csv_file = os.path.join(csv_file_path, name)
        fd = None
        process_log().info('Begin cache data to %s by %s.' % (csv_file, record.__func__))
        try:
            fd = open(csv_file, 'w')
            writer = csv.writer(fd)
            for i in xrange(0, self.record_count()):
                if base:
                    row = []
                    for k in headers:
                        row.append(record(k, i))
                    writer.writerow(row)
                else:
                    for row in self.data_record_iter(headers, i):
                        writer.writerow(row)

        except Exception, e:
            print e
        finally:
            if fd:
                fd.close()
        return csv_file

    def test(self):
        print self.record_count(), hex(self.page_size()), hex(self.read('uint32', 0x000002a0)), hex(self.data_delta())

    def base_sync(self):
        csv_file = self.dump(base=True)
        name = self.base_name
        rows = []
        process_log().info('Begin sync base info, path:%s, cache:%s' % (self.filename, csv_file))
        table = self.ds.table(name, self.base_schema)
        table.clean()
        for line in CSVSync(csv_file).fetch():
            rows.append(line)
            if len(rows) >= 10000:
                process_log().info('Sync %s of base info.' % 10000)
                table.insert(self.base_headers, rows)
                rows = []
        if rows:
            process_log().info('Sync %s of base info.' % len(rows))
            table.insert(self.base_headers, rows)
        table.commit()

    def data_sync(self):
        csv_file = self.dump()
        name = self.data_name
        rows = []
        process_log().info('Begin sync dairy, path:%s, cache:%s' % (self.filename, csv_file))
        table = self.ds.table(name, self.data_schema)
        table.clean()
        for line in CSVSync(csv_file).fetch():
            rows.append(line)
            if len(rows) >= 10000:
                process_log().info('Sync %s of dairy.' % 10000)
                table.insert(self.data_headers, rows)
                rows = []
        if rows:
            process_log().info('Sync %s of dairy.' % len(rows))
            table.insert(self.data_headers, rows)
        table.commit()

    def __del__(self):
        if self.fd:
            self.fd.close()

    def preview(self, max_line=1, base=True):
        preview = []
        if base:
            record = self.base_record
            headers = self.base_headers
        else:
            record = self.data_record
            headers = self.data_headers

        for i in xrange(0, max_line):
            preview.append([record(x, i) for x in headers])
        return preview

    def sync(self):
        """
            Sync gea file
        Args:
            g (): signal object for communicate with UI
        """
        # return self.test()
        try:
            self.base_sync()
        except Exception, e:
            error_log().error(e)
        try:
            self.data_sync()
        except Exception, e:
            error_log().error(e)
        self.ds.update()


class DC305(object):
    def __init__(self, filename):
        self.filename = filename

    def dump(self):
        pass


class CursorFetcher(object):
    def __init__(self, cursor, row=10000):
        if not cursor:
            raise Exception('Cursor is None for CursorFetcher.')
        self.cursor = cursor
        self.row = row
        self.done = False
        self.queue = Queue(maxsize=2)
        self._fetcher = threading.Thread(target=self._fetch_handler, name='CursorFetcher')
        self._fetcher.setDaemon(True)
        self._fetcher.start()

    def _fetch_handler(self):
        try:
            rows = self.cursor.fetchmany(self.row)
            while rows:
                self.queue.put(rows)
                rows = self.cursor.fetchmany(self.row)
        except Exception, e:
            error_log().error('Fetch data failed, reason:%s' % e)
        finally:
            self.queue.put(None)

    def fetch(self):
        try:
            res = self.queue.get()
            return res
        except Empty:
            return None


if __name__ == '__main__':
    dat = '/Users/haizhi/Desktop/hz/DpDairy.dat'
    # dat = 'C:\Users\HaiZhi\Desktop\hz\DpDairy.dat'
    GEA(dat).dump()
