import re
import json
import utils
import codecs
import insert_mysql


# 流量主表格 写入json文件
def Flow_master_data_table(date_time, income, account):
    if income:
        Account_income = income
    else:
        Account_income = ' _ '
    info = {
        '时间': date_time,
        '主体公司': account['gs'],
        '负责人': account['charge'],
        '名称': account['nickname'],
        '微信号': account['wx_id'],
        '原始ID': account['gh_id'],
        '粉丝来源': account['fans_src'],
        '类型': account['type'],
        '性别': account['gender'],
        '账户收入': Account_income
    }
    with codecs.open('Flow_master_data_table.json','a','utf-8') as f:
        infos = json.dumps(info)
        f.write(infos + "\r")



# 广告主表格 写入json文件
def Advertiser_data_form(data_date, account, Accumulative_attention, Service_provider, Amount_of_money):
    info = {
        "时间": data_date,
        "负责人": account['charge'],
        "名称": account['nickname'],
        "微信号": account['wx_id'],
        "原始ID": account['gh_id'],
        "粉丝来源": account['fans_src'],
        "类型": account['type'],
        "性别": account['gender'],
        "累计关注": Accumulative_attention,
        "服务商": Service_provider,
        "金额": Amount_of_money,
        "主体公司": account['gs']
    }
    with codecs.open('Advertiser_data_form.json','a','utf-8') as f:
        infos = json.dumps(info)
        f.write(infos + "\r")



# 违规表格 写入json文件
def Violation_data_table(data_date, account, Type_of_violation, Irregularities, Violation_treatment,
                         Whether_or_not_to_ban, Accumulative_attention):
    info = {
        "时间": data_date,
        "负责人": account['charge'],
        "名称": account['nickname'],
        "微信号": account['wx_id'],
        "原始ID": account['gh_id'],
        "粉丝来源": account['fans_src'],
        "类型": account['type'],
        "性别": account['gender'],
        "累计关注": Accumulative_attention,
        "违规类型": Type_of_violation,
        "违规内容": Irregularities,
        "违规处理": Violation_treatment,
        "是否封禁": Whether_or_not_to_ban,
        "主体公司": account['gs']
    }
    with codecs.open('Violation_data_table.json','a','utf-8') as f:
        infos = json.dumps(info)
        f.write(infos+"\r")



# 12小时 阅读对比 写入json文件
def insert_reading_comparison_12_hour_table(diff, data_date, account, Accumulative_attention):
    names = locals()
    i = ReportGenerator.get_msg_info(data_date, account['gh_id'])
    msg_all = get_app_msg_detail_stat(i, utils.get_day_before_today(diff, data_date))
    for x in range(17):
        names['headlines' + str(x)] = ''
    try:
        if len(msg_all[0]) != 0:
            for x in range(len(msg_all[0])):
                names['headlines' + str(x)] = msg_all[0][x]
    except Exception as e:
        print(e)

    info = {
        "时间": data_date,
        "主体公司": account['gs'],
        "负责人": account['charge'],
        "名称": account['nickname'],
        "微信号": account['wx_id'],
        "原始ID": account['gh_id'],
        "粉丝来源": account['fans_src'],
        "类型": account['type'],
        "性别": account['gender'],
        "累计关注": Accumulative_attention,
        "图文发送时间": names['headlines0'],
        "1条标题": names['headlines1'],
        "1条阅读": names['headlines2'],
        "2条标题": names['headlines3'],
        "2条阅读": names['headlines4'],
        "3条标题": names['headlines5'],
        "3条阅读": names['headlines6'],
        "4条标题": names['headlines7'],
        "4条阅读": names['headlines8'],
        "5条标题": names['headlines9'],
        "5条阅读": names['headlines10'],
        "6条标题": names['headlines11'],
        "6条阅读": names['headlines12'],
        "7条标题": names['headlines13'],
        "7条阅读": names['headlines14'],
        "8条标题": names['headlines15'],
        "8条阅读": names['headlines16']
    }
    with codecs.open('insert_reading_comparison_12_hour_table.json','a','utf-8') as f:
        infos = json.dumps(info)
        f.write(infos+"\r")



# 36小时 阅读对比 写入json文件
def insert_reading_comparison_36_hour_table(diff, data_date, account, Accumulative_attention):
    names = locals()
    i = ReportGenerator.get_msg_info(data_date, account['gh_id'])
    msg_all = get_app_msg_detail_stat(i, utils.get_day_before_today(diff, data_date))
    for x in range(17):
        names['headlines' + str(x)] = ''
    try:
        if len(msg_all[0]) != 0:
            for x in range(len(msg_all[0])):
                names['headlines' + str(x)] = msg_all[0][x]
    except Exception as e:
        print(e)

    info = {
        "时间": data_date,
        "主体公司": account['gs'],
        "负责人": account['charge'],
        "名称": account['nickname'],
        "微信号": account['wx_id'],
        "原始ID": account['gh_id'],
        "粉丝来源": account['fans_src'],
        "类型": account['type'],
        "性别": account['gender'],
        "累计关注": Accumulative_attention,
        "图文发送时间": names['headlines0'],
        "1条标题": names['headlines1'],
        "1条阅读": names['headlines2'],
        "2条标题": names['headlines3'],
        "2条阅读": names['headlines4'],
        "3条标题": names['headlines5'],
        "3条阅读": names['headlines6'],
        "4条标题": names['headlines7'],
        "4条阅读": names['headlines8'],
        "5条标题": names['headlines9'],
        "5条阅读": names['headlines10'],
        "6条标题": names['headlines11'],
        "6条阅读": names['headlines12'],
        "7条标题": names['headlines13'],
        "7条阅读": names['headlines14'],
        "8条标题": names['headlines15'],
        "8条阅读": names['headlines16']
    }
    with codecs.open('insert_reading_comparison_36_hour_table.json','a','utf-8') as f:
        infos = json.dumps(info)
        f.write(infos+"\r")



# 数据表格 写入json文件
def Data_form_table(data_date, account, Sex_rate, Accumulative_attention, Newly_added, Abolish_concern, Net_growth,
                    Headline_readings, info, income_info):

    if Headline_readings == "当天未发":
        Headline_reading_rate = 0
    elif Headline_readings == "非图文类型":
        Headline_reading_rate = 0
    else:
        Headline_reading_rate = ('{0}%'.format(round(int(Headline_readings) / int(Accumulative_attention) * 100, 2)))
    if income_info[0] == 0 or income_info[0] == "-":
        Clicking_rate = 0
        Exposure_unit_price = 0
    else:
        Exposure_unit_price = round(int(income_info[2]) / int(income_info[0]), 4)
        Clicking_rate = ('{0}%'.format(round(int(income_info[1]) / int(income_info[0]) * 100, 2)))
    if income_info[1] == 0 or income_info[1] == "-" or income_info[2] == "-" or income_info[2] == 0.0:
        Click_unit_price = 0
    else:
        Click_unit_price = round(int(income_info[2]) / int(income_info[1]), 4)
    if Sex_rate:
        Sex_rate = ('{0}%'.format(round(Sex_rate * 100, 2)))
    if income_info[2] == "-":
        Vermicelli_unit_price = 0
        Reading_unit_price = 0
        Comprehensive_reading_rate = 0
        True_reading_rate = 0
        Two_transmission_ratio = 0
        Per_capita_opening_times = 0
        Forwarding_rate = 0
        The_exposure_rate = 0
        Opening_rate = 0
    else:
        if Accumulative_attention == 0:
            Comprehensive_reading_rate = 0
            True_reading_rate = 0
            Vermicelli_unit_price = 0
            Opening_rate = 0
        else:
            Comprehensive_reading_rate = ('{0}%'.format(round(int(info[1]) / int(Accumulative_attention) * 100, 2)))
            True_reading_rate = ('{0}%'.format(
                round((int(info[3]) + int(info[5]) + int(info[7]) + int(info[10])) / int(Accumulative_attention) * 100,
                      2)))
            Vermicelli_unit_price = round(income_info[2] / Accumulative_attention, 4)
            Opening_rate = ('{0}%').format(round((int(info[2]) / Accumulative_attention), 2))
        if info[3] + info[10] == 0:
            Two_transmission_ratio = 0
        else:
            Two_transmission_ratio = round((int(info[5]) + int(info[7])) / (int(info[3]) + int(info[10])), 4)
        if info[2] + info[10] == 0:
            Per_capita_opening_times = 0
        else:
            Per_capita_opening_times = round((int(info[3]) + int(info[10])) / (int(info[2]) + int(info[10])), 4)
        if info[3] + info[5] + info[7] + info[10] == 0:
            Forwarding_rate = 0
        else:
            Forwarding_rate = ('{0}%'.format(
                          round(int(info[7]) / (int(info[3]) + int(info[5]) + int(info[7]) + int(info[10])) * 100, 4)))
        if info[1] == 0:
            The_exposure_rate = 0
            Reading_unit_price = 0
        else:
            The_exposure_rate = ('{0}%'.format(round(int(income_info[0]) / int(info[1]) * 100, 2)))
            Reading_unit_price = round(int(income_info[2]) / int(info[1]), 4)

    iinfo = {
        "时间": data_date,
        "负责人": account['charge'],
        "名称": account['nickname'],
        "微信号": account['wx_id'],
        "原始ID": account['gh_id'],
        "粉丝来源": account['fans_src'],
        "类型": account['type'],
        "性别": account['gender'],
        "男女率": Sex_rate,
        "累计关注": Accumulative_attention,
        "新增": Newly_added,
        "取关": Abolish_concern,
        "净增": Net_growth,
        "头条阅读数": Headline_readings,
        "综合阅读人数": info[0],
        "综合阅读次数": info[1],
        "会话阅读人数": info[2],
        "会话阅读次数": info[3],
        "朋友圈阅读人数": info[4],
        "朋友圈阅读次数": info[5],
        "转发人数": info[6],
        "转发次数": info[7],
        "收藏人数": info[8],
        "收藏次数": info[9],
        "历史消息阅读次数": info[10],
        "真实阅读": info[11],
        "头条阅读率": Headline_reading_rate,
        "综合阅读率": Comprehensive_reading_rate,
        "真实阅读率": True_reading_rate,
        "二次传播比": Two_transmission_ratio,
        "人均打开次数": Per_capita_opening_times,
        "转发率": Forwarding_rate,
        "打开率": Opening_rate,
        "曝光量": income_info[0],
        "点击量": income_info[1],
        "总收入": income_info[2],
        "曝光率": The_exposure_rate,
        "点击率": Clicking_rate,
        "曝光单价": Exposure_unit_price,
        "点击单价": Click_unit_price,
        "阅读单价": Reading_unit_price,
        "粉丝单价": Vermicelli_unit_price,
        "主体公司": account['gs']

    }
    with codecs.open('Data_form_table.json','a','utf-8') as f:
        infos = json.dumps(iinfo)
        f.write(infos + "\r")


# 流量主表格 数据入mysql库
def json_insert_Flow_master_data_table():
    results = []
    f = open('Flow_master_data_table.json','r',encoding='utf-8')
    for line in f.readlines():
        results.append(json.loads(line))
    insert_mysql.mysql_insert_Flow_master_data_table(results)




# 广告主表格 数据入mysql库
def json_insert_Advertiser_data_form():
    results = []
    try:
        f = open("Advertiser_data_form.json", "r", encoding="utf-8")
        for line in f.readlines():
            results.append(json.loads(line))
        insert_mysql.mysql_insert_Advertiser_data_table(results)
    except Exception as e:
        print(e)


# 违规数据表格 数据入mysql库
def json_insert_Violation_data_table():
    results = []
    f = open("Violation_data_table.json", "r", encoding="utf-8")
    for line in f.readlines():
        results.append(json.loads(line))
    insert_mysql.mysql_insert_violation_data_table_mysql(results)


# 12小时 阅读对比 数据入mysql库
def json_insert_reading_comparison_12_hour_table():
    results = []
    f = open("insert_reading_comparison_12_hour_table.json", "r", encoding="utf-8")
    for line in f.readlines():
        results.append(json.loads(line))
    insert_mysql.mysql_insert_reading_comparison_12_hour_table(results)


# 36小时 阅读对比 数据入mysql库
def json_insert_reading_comparison_36_hour_table():
    results = []
    f = open("insert_reading_comparison_36_hour_table.json", "r", encoding="utf-8")
    for line in f.readlines():
        results.append(json.loads(line))
    insert_mysql.mysql_insert_reading_comparison_36_hour_table(results)


# 数据表格 数据写入mysql库
def json_insert_Data_form_table():
    results = []
    f = open("Data_form_table.json", "r", encoding="utf-8", errors='ignore')
    for line in f.readlines():
        results.append(json.loads(line))
    insert_mysql.mysql_insert_dataForm_mysql(results)


def get_violation_info(item):
    title = item['Title']
    violation_content = re.sub('<.*?>', '', item['Content'])
    violation_cause = ''
    violation_title = ''
    violation_result = ''
    if re.search('违规处理', title):
        if re.search('涉[嫌及](.+?)，',violation_content):
            violation_cause = re.search('涉[嫌及](.+?)，', violation_content).group(1)
            violation_title = re.search(
                '违规(.*?)消息“(.*?)”', violation_content).group(2)
        else:
            violation_cause = re.search('发现此公众号(.+?)，', violation_content).group(1)
            violation_title = re.search(
                '违规(.*?)消息“(.*?)”', violation_content).group(2)
    elif re.search('功能屏蔽', title):
        if re.search('涉[嫌及](.+?)，', violation_content):
            violation_cause = re.search(
                '涉[嫌及](.+?)，', violation_content).group(1)
        elif re.search('您的帐号经查(.+?)，', violation_content):
            violation_cause = re.search(
                '您的帐号经查(.+?)，', violation_content).group(1)
        if re.search('违规内容：(.+?)如有异议', violation_content):
            violation_title = re.sub('&nbsp;', '', re.search(
                '违规内容：(.+?)如有异议', violation_content).group(1))
        elif re.search('流量主通过不正当方式制造虚假或无效曝光量、点击量', violation_content):
            violation_title = re.search(
                '流量主通过不正当方式制造虚假或无效曝光量、点击量', violation_content).group(0)
        features_by_date = {}
        features = re.findall('已屏蔽([^功能]+?)功能至([^,，]+)[，,]', violation_content)
        for feature in features:
            feature_name = feature[0]
            feature_date = feature[1]
            if feature_date in features_by_date:
                features_by_date[feature_date].append(feature_name)
            else:
                features_by_date[feature_date] = [feature_name]
        for feature_date, forbidden_features in features_by_date.items():
            violation_result += "{}: {}\n".format(
                feature_date, ','.join(forbidden_features))
    elif re.search('粉丝删除', title):
        violation_cause = re.search('所禁止的(.+?)，', violation_content).group(1)
        violation_title = re.sub('&nbsp;', '', re.search(
            '发起的"(.*?)"', violation_content).group(1))
        violation_result = re.search('已删除.+?粉丝数。', violation_content).group(0)
    elif re.search('责令处理', title):
        violation_title = re.search('图文消息“(.*?)”', violation_content).group(1)
        violation_result = re.search(
            '已被互联网信息内容主管部门责令删除', violation_content).group(0)
    return violation_cause, violation_title, violation_result


def get_violation_list(data_date, gh_id):
    return utils.get_json_file(gh_id, data_date, 'violation-list.json', data_type=list)


def get_user_gender_ratio(genders, total, needed):
    if not needed or not total:
        return ''
    needed = needed.strip()
    if not genders:
        return ''
    try:
        for gender in genders['genders']:
            if gender['attr_name'] == needed:
                return int(gender['user_count']) / int(total)
        return ''
    except (KeyError, ValueError):
        print(genders)
        return ''


def get_app_msg_topmost_read(i, ma, date):
    if ma.get('total') == []:
        return '当天未发'
    msg = ''

    if len(i):
        for m in i:
            if m['type'] == 9 and utils.get_date_from_timestamp(m['sent_info']['time']) == date:
                info = m['appmsg_info']
                other = info[0].get('other_info')
                if 'read_num' in other:
                    msg = other['read_num']
                    break
            msg = '非图文类型'
    else:
        msg = '当天未发'

    return msg


def get_app_msg_stat(m):
    all_uv = all_pv = dialog_uv = dialog_pv = moment_uv = moment_pv = share_user = share_cnt = fav_user = fav_cnt = 0
    hist = 0
    for item in m.get('item', []):
        fav_user += item['add_to_fav_user']
        fav_cnt += item['add_to_fav_count']
        src = item['user_source']
        if src == 99999999:
            all_pv = item['int_page_read_count']
            all_uv = item['int_page_read_user']
        elif src == 0:
            dialog_pv = item['int_page_read_count']
            dialog_uv = item['int_page_read_user']
        elif src == 2:
            moment_pv = item['int_page_read_count']
            moment_uv = item['int_page_read_user']
    for share in m.get('share', []):
        share_user += share['share_user']
        share_cnt += share['share_count']
    return [all_uv, all_pv, dialog_uv, dialog_pv, moment_uv, moment_pv, share_user, share_cnt, fav_user, fav_cnt, hist,
            dialog_pv + moment_pv + share_cnt + hist]


def get_publisher_stat(p):
    if p == "EMPTY":
        return ['-', '-', '-']
    return [p.get('view_count', 0), p.get('click_count', 0), p.get('cost', 0) / 100.0]


def get_app_msg_detail_stat(messages, date_to_find):
    sent_messages = []  # 服务号可能一天发送多条消息
    for message in messages:
        # 图文类型才有阅读数统计
        if message['type'] == 9 and utils.get_date_from_timestamp(message['sent_info']['time']) == date_to_find:
            sent_message = [utils.get_datetime_from_timestamp(
                message['sent_info']['time'])]
            for message_item in message['appmsg_info']:
                sent_message.append(message_item['title'])
                sent_message.append(message_item['other_info']['read_num'])
            sent_messages.append(sent_message)
    return sent_messages if sent_messages != [] else [['']]


class ReportGenerator:
    def __init__(self, library, date_to_report):
        """
        :param utils.AccountLibrary library:
        :param date_to_report:
        """
        self.date = date_to_report
        self.library = library
        self.gh_id = None

    def get_basic(self, gh_id):
        date = self.date
        if self.date == utils.get_today():
            date = utils.get_yesterday()
        return utils.get_json_file(gh_id, date, 'basic.json')

    def get_user_counter(self, gh_id):
        date = self.date
        if self.date == utils.get_today():
            date = utils.get_yesterday()
        return utils.get_json_file(gh_id, date, 'user-counter.json')

    def get_ad_info(self, gh_id):
        return utils.get_json_file(gh_id, utils.get_today(), 'app-ad-info.json')

    def get_user_attr(self, gh_id):
        return utils.get_json_file(gh_id, self.date, 'user-attr.json')

    def get_msg_counter(self, gh_id):
        return utils.get_json_file(gh_id, self.date, 'app-msg-counter.json')

    def get_msg_all(self, gh_id):
        return utils.get_json_file(gh_id, self.date, 'app-msg-all.json')

    def get_publisher_counter(self, gh_id):
        return utils.get_json_file(gh_id, self.date, 'publisher.json')

    def get_violation_list(self, gh_id):
        return utils.get_json_file(gh_id, self.date, 'violation-list.json', data_type=list)

    def get_msg_info(data_date, gh_id):
        return utils.get_json_file(gh_id, data_date, 'app-msg-info.json', data_type=list)


def get_all_publisher(gh_id, date_list):
    res = []
    date_data = []
    for item in date_list:
        data = utils.get_json_file(gh_id, item, 'publisher_by_day.json')
        if data:
            res.append(str(data.get('income') / 100))
            date_data.append(item)

        else:
            res.append('-')
            date_data.append(item)
    return res, date_data
