# coding:utf-8

"""
CopyRight(c)  2017-09-15  Fraplus Corp

Author: Weilin Shen, Xiaodong.yang
Email:  xiaodong.yang@unidt.com
Functions:

we chat server

dependence package:

fralog
handler

"""
import getopt
import json
import platform
import pprint
import sys
import time

# import down
import pandas as pd
import pymongo
import requests

from fraMail.framail import FraMail
from fralog.FraLog import Fralog
from globalsettings import CUR_MODE

if CUR_MODE == 'dev':
    from globalsettings_dev import *
else:
    from globalsettings_prod import *

#
# 是否测试阶段
#
TEST = False
#
#  数据库连接优化
#
database = None
dbClient = None
timelast = None

log = Fralog().getFileLog()

collect_map = {}
# down = DownHead()

if platform.system() == 'Windows':
    port = 27017
elif platform.system() == 'Linux':
    port = 27137
else:
    port = 27017

#
# 直接链接远程数据库，因此端口固定，不需再根据操作系统来做判断
#
# port = 27137

host = MONGO_HOST
# host = '120.132.101.30'
port = MONGO_PORT

sex = {}
sex['1'] = '男'
sex['2'] = '女'

exclude_channel = {}
exclude_channel['test'] = True
exclude_channel['test-1'] = True
exclude_channel['common-test'] = True

questions_cache = {}


def _is_channel_valid(channel):
    '''
    判断给定的channel是否应该排除
    :param channel: 
    :return: 
    '''
    if channel is None or channel == 'null':
        return False
    if exclude_channel.get(channel) is None:
        return True
    else:
        return not exclude_channel[channel]


def _get_db_client():
    global host, port, TEST
    """
    获取到mongodb的链接
    :return: 
    """
    if MONGO_DB_PWD is None or MONGO_DB_PWD == '':
        client = pymongo.MongoClient(host=MONGO_HOST, port=MONGO_PORT, socketKeepAlive=True)  # 连接数据库
    else:
        client = pymongo.MongoClient(host=MONGO_HOST, port=MONGO_PORT, username=MONGO_DB_USERNAME,
                                     password=MONGO_DB_PWD, socketKeepAlive=True)  # 连接数据库
    # log.info('host = %s'%(host))
    #
    # 需要确保测试阶段和上线阶段采用不同的数据库
    #
    if TEST:
        db = client.test
    else:
        db = client[MONGO_DB_NAME]

    collect_map['questions_collection'] = db.questions_collection
    collect_map['surveys_collection'] = db.surveys_collection
    collect_map['responses_collection'] = db.responses_collection
    collect_map['company_collection'] = db.company_collection
    collect_map['products_collection'] = db.products_collection
    db.responses_collection.create_index([('code', pymongo.ASCENDING), ('response.wxid', pymongo.ASCENDING)])
    db.products_collection.create_index([('wxuser.openid', pymongo.ASCENDING)])
    db.user_collection.create_index([('openid', pymongo.ASCENDING)])
    # db.wxuser.create_index([('openid', pymongo.ASCENDING)])
    return (db, client)


def load_questions(db):
    rows = db.questions_collection.find()
    for row in rows:
        qid = row.get('question_id')
        questions_cache[qid] = row


def get_db_client():
    global timelast, database, dbClient
    if timelast is None:
        log.info('Yest')
        database, dbClient = _get_db_client()
        timelast = time.time()
        return (database, dbClient)
    else:  # 已经生成过连接，判断是否可以重用
        return (database, dbClient)


def clear_db(db):
    """
    清除mongodb的历史数据
    :param db: 到mongodb的链接 
    :return: 
    """
    db.questions_collection.remove()
    db.surveys_collection.remove()
    # db.responses_collection.remove()s
    db.company_collection.remove()


def import_data_from_xls(db, filename):
    """

    :param db: 
    :return: 
    """
    # 读入excel文件中的第1,2个表
    df1 = pd.read_excel(filename, sheetname=0)
    df2 = pd.read_excel(filename, sheetname=1)
    df3 = pd.read_excel(filename, sheetname=2)
    # 公司编号
    df4 = pd.read_excel(filename, sheetname=4)

    questions_json = df1.to_json(orient='records')  # json object
    surveys_json = df2.to_json(orient='records')
    desc_json = df3.to_json(orient='records')
    company = df4.to_json(orient='records')

    questions_json = json.loads(questions_json)  # 把json object 转为 python object (即, python dictionary)
    surveys_json = json.loads(surveys_json)
    desc_json = json.loads(desc_json)
    company = json.loads(company)
    for each in questions_json:
        db.questions_collection.insert(each)  # 插入数据到 questions_collection

    for each in surveys_json:
        db.surveys_collection.insert(each)  # 插入数据到 surveys_collection

    # 插入维度得分描述
    for each in desc_json:
        db.desc_collection.insert(each)

    # 插入公司编号信息
    for each in company:
        db.company_collection.insert(each)


def check_data(db):
    desc = db.desc_collection.find_one({"id": "o"})
    pprint.pprint(desc)


def check_collection(collection):
    """
    产看集合内的数据
    :param collection: 
    :return: 
    """
    rows = collection.find()
    for row in rows:
        log.info("%s" % row)


def print_resp_log(db):
    """
    打印用户答题数据
    :param db: 
    :return: 
    """
    rows = db.responses_collection.find()
    ct = 0
    for row in rows:
        wxid = row['response']['wxid']
        if wxid is None or len(wxid) < 5:
            log.info('无效账号')
            continue
        # if row['response']['wechatFrom'] == 'o4WARv_mRKZ089DWLijr7qppfKR4':
        log.info('code: %s,  blog: %s,  wechat_id: %s,  ShareFrom: %s' % (row['code'], row['response']['blog'],
                                                                          row['response']['wxid'],
                                                                          row['response']['wechatFrom']))
        ct += 1
    log.info('Total: %d' % (ct))
    log.info("开始去重统计........")
    rows = db.responses_collection.distinct("response.wxid")
    ct = 0
    for row in rows:
        if row is None or len(row) < 5:
            log.error('无效的账号')
            continue
        ct += 1
        log.info('%s' % (row))
    log.info("目前微信数： %d" % (ct))


def print_wechat(db):
    """
    打印微信用户信息
    :param db: 
    :return: 
    """
    log.warn('该函数已过期，请最好停止使用')
    rows = db.products_collection.find({"wxuser.err": 0})
    try:
        log.info('Total user: %d' % (rows.count()))
        for row in rows:
            log.info('nick name:%s, Province: %s , city: %s -- openid: %s' % (
                row['wxuser']['data']['nickname'], row['wxuser']['data']['province'], row['wxuser']['data']['city'],
                row['wxuser']['data']['openid']))
            data = row['wxuser']['data']
            #
            # 添加任务，下载用户头像
            # down.addJob(data) TODO  依赖down包

    except Exception:
        log.info('Error')


# def download_all_head(db):
#     """
#     下载所有用户头像
#     :param db:
#     :return:
#     """
#     rows = db.wxuser.find()
#     for row in rows:
#         down.addJob(row)
#     log.info('用户头像下载任务添加完毕，等待下载完成')

def _get_channel(db, channel):
    """
    获取指定渠道的数据
    :param db: 
    :param channel: 
    :return: 
    """
    log.info('渠道名： %s' % (channel))
    rows = db.responses_collection.find({"response.channel": channel})
    log.info('当前记录条数: %d' % (rows.count()))
    return rows


def print_channel(db, channel):
    """
    打印指定渠道的数据
    :param db: 
    :param channel: 
    :return: 
    """
    rows = _get_channel(db, channel)
    for row in rows:
        print(row)


def export_wechat(db):
    rows = db.products_collection.find({"wxuser.err": 0})
    hist = {}
    try:
        csv = []
        for row in rows:
            obj = row['wxuser']['data']
            openid = obj['openid']
            if hist.get(openid) is not None:
                continue
            else:
                hist[openid] = ''
                csv.append(obj)
        df = pd.DataFrame(csv)
        df.to_csv('wx.csv')
    except Exception as e:
        print(e)


def get_user(openid):
    url = 'http://wx.zhishixia.com/wechat/getuser'
    params = {}
    params['openid'] = openid
    r = requests.post(url=url, data=params)
    return json.loads(r.text)


def _export_user(db, code='zhongjin', channel='zhongjin-online'):
    rows = db.responses_collection.find({"response.channel": channel})
    if rows is None:
        return
    rows = rows.distinct("response.wechatFrom")
    user = []
    for row in rows:
        us = {}
        us['wxid'] = row
        user.append(us)
    df = pd.DataFrame(user)
    df.to_csv('wechatFrom.csv')


def _export_all_user(db):
    rows = db.wxuser.find()
    rts = []
    for row in rows:
        rec = {}
        rec['openid'] = row['openid']
        rec['nickname'] = row['nickname']
        rec['country'] = row['country']
        rec['province'] = row['province']
        rec['city'] = row['city']
        rec['face'] = row['headimgurl']
        rec['sex'] = row['sex']
        rts.append(rec)
    df = pd.DataFrame(rts)
    df.to_csv('wechat.csv')


def export_resp(db, code=None, channel=None, all=False, filename='resp.xlsx'):
    """
    导出用户的答题信息
    :param db: 
    :return: 
    """
    #
    # 30 题版本的问卷
    # questions = [101,102,103,104,105,106,107,108,109,110,111,112,113,114,115,116,117,118,119,120,121,122,123,124,125,126,127,128,129,130,45,46]
    #
    # 28 题版本问卷
    quest_rows = db['surveys_collection'].find_one({'code': code})['questions']
    # questions = [571,576,575,572,1117,814,306,307,318,325,326,410,
    #              552,553,555,556,557,558,559,561,562,563,565,568,
    #              569,201,202,203,204,205,206,207,208,209,210,211,
    #              212,213,214,215,216,217,218,219,220,221,222,223,
    #              224,225,226,227,228]
    quest_rows = quest_rows.split(',')
    questions = []
    for q in quest_rows:
        questions.append(int(q))
    print(questions)
    load_questions(db)

    if not all:
        if code is None and channel is not None:
            rows = db.responses_collection.find({"response.channel": channel}).batch_size(10)
        # elif code is not None and channel is None:
        #     rows = db.responses_collection.find({'code': code})
        # elif code is not None and channel is not None:
        #     rows = db.responses_collection.find({'code': code, 'response.channel': channel})
        elif code is not None:
            # rows = db.responses_collection.find({})
            rows = db.responses_collection.find({'code': code}).batch_size(10)
    else:
        rows = db.responses_collection.find().batch_size(10)
    dts = []
    for row in rows:
        try:
            resp = row['response']
            wxid = resp['wxid']
            log.info('微信ID：%s' % wxid)
            if not _is_channel_valid(resp['channel']):
                continue
            # if wxid is None or len(wxid) < 5:
            #    continue
            tp = {}
            tp['start'] = resp['start']
            tp['end'] = resp['end']
            # tp['姓名'] = resp['571']['answer']
            # tp['blog'] = resp['blog']
            #
            # 每一题的答案
            for elem in questions:
                try:
                    quest = questions_cache.get(elem)
                    title = quest.get('question_title')
                    ans = None
                    # ans = quest.get( resp[str(elem)]['answer'] )
                    # time_cost = quest.get(resp[str(elem)]['time'])
                    if ans is None:
                        if resp.get(str(elem)) is not None:
                            ans = resp[str(elem)]['answer']
                            time_cost = resp[str(elem)]['time']
                            tp[title] = str(ans) + ':' + str(time_cost)
                        else:
                            tp[title] = ''
                except Exception as e:
                    log.error(e)
                    print(elem)
                    tp[str(elem)] = ''

            # 工号
            # tp['AccountNo'] = resp['300']['answer']
            nickname = get_nickname(db, resp['wxid'])
            if nickname is None:
                nickname = resp['wxid']
            # tp['昵称'] = nickname
            # tp['openid'] = resp['wxid']
            tp['问卷代码'] = row['code']
            tp['渠道'] = resp['channel']
            # tp['wechatFrom'] = resp['wechatFrom']
            if row.get('zocean') is not None:
                tp['z0-O'] = row['zocean']['O']
                tp['z1-C'] = row['zocean']['C']
                tp['z2-E'] = row['zocean']['E']
                tp['z3-A'] = row['zocean']['A']
                tp['z4-N'] = row['zocean']['N']

            # ==========df_ocean===========
            # if row.get('df_ocean') is not None:
            #     tp['df-O'] = row['df_ocean']['O']
            #     tp['df-C'] = row['df_ocean']['C']
            #     tp['df-E'] = row['df_ocean']['E']
            #     tp['df-A'] = row['df_ocean']['A']
            #     tp['df-N'] = row['df_ocean']['N']
            # ==========wx_ocean===========
            # if row.get('wx_ocean') is not None:
            #     tp['wx-O'] = row['wx_ocean']['O']
            #     tp['wx-C'] = row['wx_ocean']['C']
            #     tp['wx-E'] = row['wx_ocean']['E']
            #     tp['wx-A'] = row['wx_ocean']['A']
            #     tp['wx-N'] = row['wx_ocean']['N']
            # ==========get_score===========
            # if row.get('get_score') is not None:
            #     tp['get-O'] = row['get_score']['O']
            #     tp['get-C'] = row['get_score']['C']
            #     tp['get-E'] = row['get_score']['E']
            #     tp['get-A'] = row['df_ocean']['A']
            #     tp['get-N'] = row['get_score']['N']
            # #
            # # 获取用户对珠宝图片的喜好
            # hs = db.products_collection.find_one({'wxuser.data.openid': wxid})
            #
            # if hs['nowscore'] is not None:
            #     score = hs['nowscore']
            #     for sc in score:  # 每个元素为object
            #         for k in sc:
            #             tp[k] = sc[k]
            #
            # #
            # # 获取用户信息
            # usr = get_user(wxid)
            # if usr is None or usr['err'] != 0:
            #     tp['nickname'] = ''
            #     tp['province'] = ''
            #     tp['city'] = ''
            #     tp['sex'] = ''
            #     #continue
            # else:
            #     usr = usr['data']
            #     tp['nickname'] = usr['nickname']
            #     tp['province'] = usr['province']
            #     tp['city'] = usr['city']
            #     tp['sex'] = usr['sex']
            dts.append(tp)
            # print('成功处理: %s -- %s'%(wxid, tp['nickname']))
        except Exception as e:
            # log.error(e)
            print(e)
            continue
    df = pd.DataFrame(dts)
    df.to_excel(filename, sheet_name='用户问卷')
    log.info('数据导出完毕')
    return 0


def export_questions(db, questions=None, filename='resp.xlsx'):
    """
    导出问题库
    :param db:
    :return:
    """
    rows = db.questions_collection.find().batch_size(10)
    dts = []
    for row in rows:
        try:
            tp = {}
            tp['题目'] = row['question_title']
            tp['题号'] = row['question_id']
            tp['题型'] = row['type']
            tp['属性'] = row['question_type']
            tp['特征库编码'] = row['rule_id']

            # ques = [571, 576, 575, 572, 1117, 814, 228, 227, 553, 226, 225, 224, 223, 419, 222, 221, 220, 422, 219, 218,
            #         217, 421, 216, 215, 400, 214, 213, 212, 211, 210, 209, 208, 207, 206, 205, 204, 203, 202, 201]

            # ques = [228, 227, 226, 225, 224, 223, 222, 221, 220, 219, 218, 217, 216, 215, 214, 213, 212, 211, 210, 209,
            #         208, 207, 206, 205, 204, 203, 202, 201]
            ques = questions.split('|')
            # ques = [1222, 1221, 1220, 1219, 1218, 1217, 1216, 1215, 1214, 1213, 1212, 1211, 1207, 1206, 1204, 655, 649,
            #         637, 635, 603]
            if row['question_id'] in ques:
                dts.append(tp)
            # print('成功处理: %s -- %s'%(wxid, tp['nickname']))
        except Exception as e:
            # log.error(e)
            print(e)
            continue
    df = pd.DataFrame(dts)
    df.to_excel(filename, sheet_name='问题库内容')
    log.info('数据导出完毕')
    return 0


def surveys_resp(db, filename):
    """
    导出所有模板问卷
    :param db:
    :return:
    """
    # rows = db.surveys_collection.find().batch_size(10)
    rows = db.t_selectrule_map.find({'company_id': 'default_all'}).batch_size(10)
    dts = []
    for row in rows:
        try:

            # db.getCollection('t_selectrule_map').find({'company_id': 'default_all'})
            tp = {}
            tp['序号'] = row.get('number')
            tp['规则名称'] = row.get('rule_name')
            tp['描述'] = row.get('description')
            tp['规则id'] = row.get('rule_id')
            # tp['公司名称'] = row.get('company_id')
            # tp['所属问卷类型select_survey_type'] = row.get('select_survey_type')
            dts.append(tp)
            # print('成功处理: %s -- %s'%(wxid, tp['nickname']))
        except Exception as e:
            # log.error(e)
            print(e)
            continue
    df = pd.DataFrame(dts)
    df.to_excel(filename, sheet_name='模板问卷')
    log.info('数据导出完毕')
    return 0


def export_resp_with_user(db, nickname=None, wxid=None, all=False):
    """
    根据微信端的昵称或id导出数据
    :param db: 
    :param nickname: 
    :param wxid: 
    :return: 
    """
    if nickname is None and wxid is None:
        log.error('昵称和微信号必须提供一个')
        return -1

    # 提供了昵称，则根据昵称查找微信号
    if not (nickname is None):
        user = db.wxuser.find_one({"nickname": nickname})
        if user is None:
            log.error("抱歉，我们无法从数据库中查找到您提供的用户信息，请您确认输入是否正确")
            return -1
        else:
            wxid = user['openid']
    # 仅提供了openid，则同样从用户表查出该用户的具体信息
    else:
        user = db.wxuser.find_one({"openid": wxid})
        if user is None:
            log.error('数据库中没有改用户的数据，请检查您提供的输入是否正确')
            return -1
        else:
            nickname = user['nickname']

    if all is True:  # 导出所有用户数据
        return export_resp(db=db, all=True)

    # 最终还是根据openid筛选记录
    rows = db.responses_collection.find({"response.wxid": wxid})
    rets = []
    for row in rows:
        r = {}
        r['nickname'] = nickname
        r['openid'] = wxid
        r['z-O'] = row['bigFive']['O']
        r['z-C'] = row['bigFive']['C']
        r['z-E'] = row['bigFive']['E']
        r['z-A'] = row['bigFive']['A']
        r['z-N'] = row['bigFive']['N']
        r['开始时间'] = row['response']['start']
        r['结束时间'] = row['response']['end']
        rets.append(r)
    df = pd.DataFrame(rets)
    df.to_csv('%s.csv' % (nickname))
    log.info('该用户的数据已经导出')
    return 0


def get_nickname(db, openid):
    row = db.wxuser.find_one({'openid': openid})
    if row is not None:
        ret = ''
        try:
            ret = row['nickname']
        except Exception:
            ret = openid
        return ret
    else:
        return None


def check_job_num(db, code='v28', channel='test'):
    rows = db.responses_collection.find({"response.channel": channel})
    ret = []
    for row in rows:
        try:
            r = {}
            if row['response']['jobnum'] == '123456':
                r['z-O'] = row['bigFive']['O']
                r['z-C'] = row['bigFive']['C']
                r['z-E'] = row['bigFive']['E']
                r['z-A'] = row['bigFive']['A']
                r['z-N'] = row['bigFive']['N']
                ret.append(r)
        except Exception as e:
            print(e)
    pf = pd.DataFrame(ret)
    pf.to_csv('12345.csv')


def main():
    db = get_db_client()
    opts, args = getopt.getopt(sys.argv[1:], "hc:p:")
    for opt, value in opts:
        if opt == '-c':
            check_collection(collect_map[value])
        elif opt == '-p':
            if value == 'resp':
                print_resp_log(db)
            elif value == 'wechat':
                print_wechat(db)
        else:
            print('python UploadSurvey.py -c products_collection')
            print('python UploadSurvey.py -p resp    打印用户答案')
            print('python UploadSurvey.py -p wechat  打印用户信息')


def _monitor(db, channel, title=''):
    """
    监控某一渠道下的数据
    :param db: 
    :param channel: 
    :return: 
    """
    pre = 0
    cur = 0
    while True:
        rows = _get_channel(db, channel)
        cur = rows.count()
        if cur != pre:
            pre = cur
            content = '<h1>发现自己</h1>'
            content += '<h3><b>公司名称：</b><span>%s</span></h3>' % (title)
            content += '<h3>用户完成问卷次数：%s</h3>' % (cur)
            try:
                mail = FraMail()
                mail.login()
                mail.send('xiaodong.yang@unidt.com', content, title)
                mail.send('xuan.feng@unidt.com', content, title)
                mail.send('lz@lcsw.cn', content, title)
                mail.send('zouxianduo@lcsw.cn', content, title)
            except Exception as e:
                pass
        time.sleep(3600)


def export_data(db, filter):
    rows = db.responses_collection.find(filter)
    ret = []
    for row in rows:
        ret.append(row)
    pf = pd.DataFrame(ret)
    pf.to_csv('data.csv')


if __name__ == '__main__':
    (db, client) = get_db_client()
    # export_resp(db,code='CICCBB73', channel='CICCBB73_channel', filename='CICC.xlsx')
    # export_resp(db, code='hr', filename='hr.xlsx')
    # _monitor(db, channel='lichu-v2', title='利楚')
    # export_resp(db, code='fill', channel='aixinnuo')
    # download_all_head(db)
    # _export_all_user(db)
    # _export_user(db, code='v28', channel='lichu-test')
    # _export_all_user(db)
    # export_resp(db, code='v28',channel = 'test-1', all=True)
    # _monitor(db, channel='lichu-test', title = '利楚')
    # _monitor(db, channel='fendu', title='分度')
    # check_job_num(db)
    # clear_db(db)

    # data = {}
    # data['channel'] = '123'
    # row = db.wxuser.find_one({"openid": 'oG-Lc1NEYgMb6sgKVIZ3oUv9taYQ'})
    # db.wxuser.update({"openid": 'oG-Lc1NEYgMb6sgKVIZ3oUv9taYQ'}, {"$set" : data})
    # print(row)
    # export_resp(db,code='hr', channel='test')
    # export_resp_with_user(db, nickname='qinyun')
    # import_data_from_xls(db, 'survey2.xlsx')
    # client.close()
    # export_resp(db)
    # export_wechat(db)
    # print_wechat(db)
    # main()
    # db = get_db_client()
    # clear_db(db)
    # import_data_from_xls(db,'survey.xlsx')
    # log.info('导入数据完毕')

    # print_wechat(db)
    # check_collection(db.products_collection)
    # print_resp_log(db)
    #
    # df = pd.read_excel('fill.xlsx', sheetname=0)
    # surveys_json = df.to_json(orient='records')
    #
    # surveys_json = json.loads(surveys_json)
    # db.surveys_collection.insert(surveys_json)
