# !/usr/bin/env python
# -*- coding: utf-8 -*-

import sys
import os
import psycopg2
import csv
import traceback
from datetime import datetime
from scpy.logger import get_logger

ROOT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
from company_crawler.company_crawler_util import get_db_config

db_config = get_db_config(ROOT_PATH)
logger = get_logger(__file__)

conn = psycopg2.connect(database=db_config['db'], host=db_config['host'], port=db_config['port'],
                        user=db_config['user'], password=db_config['pw'])
cur = conn.cursor()


def get_many(sql, param=()):
    try:
        cur.execute(sql, param)
        return cur.fetchall()
    except Exception, e:
        logger.exception(e)

# -----------------------从stock_operate表中取出一段时间内操作过的股票记录------------------------------
def get_header(which_header):
    if which_header == 'csv':
        return ['stock_id', 'stock_code', 'name_old', 'name_new', 'reason', 'update_time', 'pinyin', 'sywg_industry', 'zx_industry']
    if which_header == 'select_stock':
        return ['stock_id', 'stock_code', 'name_old', 'name_new', 'reason', 'update_time']


def select_stock(start_date, end_date, csv_path=None):
    if csv_path is None:
        csv_path = os.path.join(os.path.dirname(__file__), 'operate_stock.csv')

    result = get_operate_stock(start_date, end_date)  # [2016-09-09, 2016-09-13), 左闭右开

    with open(csv_path, 'wb') as csvfile:
        csv_writer = csv.writer(csvfile, dialect='excel')
        csv_writer.writerow(get_header('csv'))

    with open(csv_path, 'ab+') as csvfile:
        csv_writer = csv.writer(csvfile, dialect='excel')
        for single in result:
            single = list(single)
            if len(single) < len(get_header('csv')):
                single.extend([None for _ in xrange(len(get_header('csv')) - len(single))])
            csv_writer.writerow(single)


def get_operate_stock(start_date, end_date):
    csv_header = get_header('select_stock')
    sql = "select %s from stock_operate where update_time>=%s AND update_time<%s;"
    sql = sql % (','.join(csv_header), '%s', '%s')
    result = get_many(sql, (start_date, end_date))
    return result


# -----------------------根据csv中的数据，向db中新增一条记录或者更新一条记录------------------------------

def csv_to_db(method, table_name, field_name_and_is_mapping_dict, csv_path=None):
    '''
    根据参数method，确定从CSV中向db新增或者修改一个条记录
    要求：method=update时, table中一个stock_id最多对应一条记录
    :param function method, 操作一条记录的方法，可以是新增insert，可以是修改update
    :param dict field_name_and_is_mapping_dict={field_name: is_mapping, }, 其中, field_name要与表中对应的列名字相同; is_mapping是指该字段是否是需要查映射表
    :param str csv_path: csv文件路径
    :return: null
    '''
    if csv_path is None:
        csv_path = os.path.join(os.path.dirname(__file__), 'operate_stock.csv')

    with open(csv_path, 'r') as csvfile:
        csv_reader = csv.reader(csvfile)
        header = csv_reader.next()

        field_index_dict = get_field_index(field_name_and_is_mapping_dict.keys(), header)
        for total, row in enumerate(csv_reader):
            field_dict = build_field_dict(row, field_index_dict, field_name_and_is_mapping_dict)
            if 'stock_id' in header:
                field_dict['stock_id'] = row[header.index('stock_id')]
            elif 'stock_code' in header:
                stock_code = header.index('stock_code')
                stock_code = row[stock_code][:6]  # 取前六位是股票代码
                field_dict['stock_id'] = get_stock_id(stock_code)
            else:
                pass

            if not field_dict.get('stock_id'):
                logger.warning('No Stock_ID: the %d row' % total)
                continue

            method(field_dict, table_name)

            print 'Done total:{} - stock_id: {}'.format(total, field_dict.get('stock_id'))


def get_stock_id(stock_code):
    sql = 'SELECT id from stock where stock_code=%s'
    cur.execute(sql, (stock_code,))
    stock_id = cur.fetchone()[0]
    return stock_id



def get_field_index(field_list, header):
    """
    获得csv中，field_list中的field对应的列数
    :param field_list: 待插入field名字
    :param header: tuple csv的第一行
    :return: dict {field_name: index}
    """
    index_dict = dict()
    for field in field_list:
        try:
            index = header.index(field)
            index_dict[field] = index
        except ValueError:
            logger.info('No field--[%s] in csv' % (field,))

    return index_dict


def build_field_dict(row, field_index_dict, field_name_and_is_mapping_dict):
    """
    根据csv的一行row，生成一个dict{filed: value}，可以处理如果csv中是value而数据库中是value_id的字段
    :param row: csv中的一行数据
    :param field_index_dict: field_name以及对应的row中的位置
    :param field_name_and_is_mapping_dict:
    :return:
    """
    field_dict = {}
    for field_name, index in field_index_dict.items():
        field_value = row[index]

        is_mapping = field_name_and_is_mapping_dict.get(field_name)
        if is_mapping:
            mapping_table_name = 'company_mapping_{}'.format(field_name)
            field_name = '%s_id' % (field_name,)
            value = get_id_from_mapping_db(mapping_table_name, field_value)
        else:
            value = field_value

        field_dict[field_name] = value

    return field_dict


def get_id_from_mapping_db(table, value, display=u''):
    """
    从table中获取name=value的id,如果table中没有name=value,则在该表中新增一条记录,默认name==display_name
    :param table: 表名
    :param value: 查询的name
    :return: name=value对应的id
    """
    if not value:
        return None

    sql = 'SELECT id FROM %s WHERE name=%s' % (table, '%s')
    try:
        cur.execute(sql, (value,))
        _id = cur.fetchone()[0]
        if _id:
            return _id

        if not display:
            display = value
        sql = 'INSERT INTO %s (name, display_name) VALUES (%s, %s) RETURNING id' % (table, '%s', '%s')
        cur.execute(sql, (value, display))
        conn.commit()
        _id = cur.fetchone()[0]
    except Exception:
        trace = traceback.format_exc()
        logger.exception(trace)
        conn.commit()
        _id = None

    return _id


def update(field_dict, table_name):
    import copy
    field_dict_temp = copy.copy(field_dict)
    stock_id = field_dict_temp.pop('stock_id')

    for name, value in field_dict_temp.items():
        sql = 'UPDATE %s SET %s=%s WHERE id=%s'
        sql = sql % (table_name, name, '%s', '%s')
        try:
            cur.execute(sql, (value, stock_id))
        except Exception, e:
            logger.exception(e)
        finally:
            conn.commit()


def insert(field_dict, table_name):
    if check_duplicate(field_dict, table_name):
        print 'Non Insert: Duplicated'
        return

    field_dict['update_time'] = datetime.today()
    sql = 'INSERT INTO %s ' \
          '(%s)' \
          'VALUES (%s)'
    sql = sql % (table_name,
                 ','.join(field_dict.keys()),
                 ','.join(['%s' for _ in xrange(len(field_dict))])
                 )
    try:
        cur.execute(sql, field_dict.values())
    except Exception, e:
        logger.exception(e)
    finally:
        conn.commit()


def check_duplicate(field_dict, table_name):
    """
    查重
    """
    sql = 'SELECT count(*) from %s where %s'
    condition_str = ' AND '.join([name + '=%s' for name in field_dict.keys()])
    sql = sql % (table_name, condition_str)
    cur.execute(sql, field_dict.values())

    res = cur.fetchone()[0]

    if res:
        return True
    else:
        return False


def close():
    cur.close()
    conn.close()


if __name__ == '__main__':
    field_name_and_is_mapping_dict = {
                                        'stock_id': False, 'level_1': False, 'level_2': False,
                                        'level_3': False, 'level_4': False, 'method': False
                                      }
    csv_files = ['wd_industry.csv', 'sywg_industry.csv', 'zx_industry.csv', 'gx_industry.csv']
    for csv_file in csv_files:
        csv_path = os.path.join(os.path.dirname(__file__), csv_file)
        csv_to_db(method=insert,
                  table_name='company_industry',
                  field_name_and_is_mapping_dict=field_name_and_is_mapping_dict,
                  csv_path=csv_path)

    # 关闭连接
    close()
