import json
import os

import pandas as pd
from lxml import etree

from zbj.zbjdata.badbidlist import badbidlist
from zc_getdata.openurl import openurl
from zbj.getPro import ZBJ_DATAPATH, ls_project_list, get_bidfile, get_bidname, get_profile, ZBJ_BID_BASEURL, \
    download_bids, download_badproject, download_bad_bids
from zbj import utils


def zbj_none_user(uid):
    bid = {}
    bid['uid'] = uid
    bid['Cumulative_turnover'] = 0
    bid['halfyear_turnover'] = 0
    bid['favorable_rate'] = 0
    bid['service_quality_rate'] = 0
    bid['membership_rank'] = 0
    bid['Employers_recommend'] = 0
    bid['enterprise_certification'] = False
    return bid


def save_bids_page(uid):
    burl = f'https://shop.zbj.com/{uid}'
    savepath = ZBJ_DATAPATH + f'bidhtml/{uid}.html'
    html = openurl(burl, savepath, saveName=f'zbj_{uid}.html')


def parse_bidsInfo(uid):
    # 仅从文件中解析bid信息
    bid_url = f'https://shop.zbj.com/{uid}'
    bid = {}
    print(f"parse uid {uid}")
    fname = get_bidname(uid)
    PATH_SAVE_NAME = get_bidfile(uid)
    bid_page = ''
    if os.path.isfile(PATH_SAVE_NAME):
        parser = etree.HTMLParser(encoding='utf-8')
        bid_page = etree.parse(PATH_SAVE_NAME, parser=parser)
    else:
        return zbj_none_user(uid)
    # 先判断是哪种页面
    ist5s = bid_page.xpath('//body[contains(@class,"t5s")]')
    if len(ist5s) != 0:
        # 是t5s
        Cumulative_turnover = bid_page.xpath('//div[@class ="personal-shop-balance"][1]/span[2]/text()')
        halfyear_turnover = bid_page.xpath('//div[@class ="personal-shop-balance"][2]/span[1]/text()')
        favorable_rate = bid_page.xpath('//span[@class ="good-rate-equal"]/text()')
        enterprise_certification = bid_page.xpath('//span[@class="tag-icons icons-identity-business"]/i/text()')
        service_quality_rate = bid_page.xpath('//div[@class="shop-evaluate-det"]/span/text()')
        # 会员等级（猪n戒）
        membership_rank = bid_page.xpath('//span[contains(@class,"tag-icons icons-ability-")]/i/text()')
        Employers_recommend = \
            bid_page.xpath('//label[@class ="icon-wrap recommend-yes sj-zbj-shop-ico-comments"]/span/text()')
    else:
        # 不是t5s  eg: https://shop.zbj.com/1264613/
        Cumulative_turnover = bid_page.xpath('//div[contains(text(),"累计成交：")]/strong/text()')
        halfyear_turnover = bid_page.xpath('//div[contains(text(),"近半年成交")]//preceding-sibling::div/text()')
        favorable_rate = bid_page.xpath('//div[contains(text(),"好评率")]//preceding-sibling::div/text()')
        enterprise_certification = bid_page.xpath(
            '//div[@id="base-info-card"]//i[@class="tag-text"][contains(text(),"企业")]')

        service_quality_rate = bid_page.xpath('//div[contains(text(),"服务质量评分")]//preceding-sibling::div/text()')
        # 会员等级（猪n戒）
        membership_rank = \
            bid_page.xpath('//div[@id="base-info-card"]//i[@class="tag-text"][contains(text(),"猪")]/text()')

        Employers_recommend = \
            bid_page.xpath('//div[contains(text(),"雇主推荐")]/text()')

    if len(Cumulative_turnover) == 0:
        Cumulative_turnover = 0
    else:
        Cumulative_turnover = Cumulative_turnover[0]
        Cumulative_turnover = float(Cumulative_turnover.strip().replace(',', ''))

    if len(halfyear_turnover) == 0:
        halfyear_turnover = 0
    else:
        halfyear_turnover = halfyear_turnover[0]
        halfyear_turnover = float(halfyear_turnover.strip().replace(',', ''))

    if len(enterprise_certification) != 0:
        enterprise_certification = True
    else:
        enterprise_certification = False

    if len(favorable_rate) == 0:
        favorable_rate = 0
    else:
        favorable_rate = favorable_rate[0]
        favorable_rate = round(float(favorable_rate.strip().strip('%')) / 100, 3)

    if len(membership_rank) == 0:
        membership_rank = ''
    else:
        membership_rank = membership_rank[0]
        membership_rank = str(membership_rank).strip('猪').strip('戒')
        # 转为数字
        membership_rank = utils.chinese2digits(membership_rank)

    if len(service_quality_rate) == 0:
        service_quality_rate = 0
    else:
        service_quality_rate = float(service_quality_rate[0].strip())

    if len(Employers_recommend) == 0:
        Employers_recommend = 0
    else:
        Employers_recommend = Employers_recommend[0]
        Employers_recommend = Employers_recommend.replace('（', '(').replace('）', ')')
        Employers_recommend = Employers_recommend[Employers_recommend.find('(') + 1:Employers_recommend.find(')')]
    bid['uid'] = uid
    bid['Cumulative_turnover'] = Cumulative_turnover
    bid['halfyear_turnover'] = halfyear_turnover
    bid['favorable_rate'] = favorable_rate
    bid['service_quality_rate'] = service_quality_rate
    bid['membership_rank'] = membership_rank
    bid['Employers_recommend'] = Employers_recommend
    bid['enterprise_certification'] = enterprise_certification
    return bid


# 从所有project中搜集uid
def colection_uids():
    parser = etree.HTMLParser(encoding='utf-8')
    bids_id_set = set()
    print('colection_uids ...')
    relationnum = 0
    project_path_list = ls_project_list()
    for fpath in project_path_list:
        tree = etree.parse(fpath, parser=parser)
        project = parse_proInfo(tree)
        if project['bids'] != '':
            uid = project['bids'].split(',')
            relationnum += len(uid)
            for i in uid:
                bids_id_set.add(i)
    print(f'relationnum :{relationnum}')
    return bids_id_set


def parse_from_file(fpath):
    print(f'parse project {fpath}')
    parser = etree.HTMLParser(encoding='utf-8')
    tree = etree.parse(fpath, parser=parser)
    project = parse_proInfo(tree)
    prodata = pd.DataFrame(project, index=[0])
    excel_writer = pd.ExcelWriter(ZBJ_DATAPATH + f'proinfo/zbj_{project["id"]}.xls')
    prodata.to_excel(excel_writer=excel_writer, sheet_name='project_sheet')
    if project['bids'] != '':
        bids_id = project['bids'].split(',')
        bids_list = []
        for uid in bids_id:
            bid = parse_bidsInfo(uid)
            bids_list.append(bid)
        biddata = pd.DataFrame(bids_list)
        biddata.to_excel(excel_writer=excel_writer, sheet_name='bids_sheet')
    excel_writer.save()
    # 解析bids 同时存入
    return project


def parse_proInfo(tree):
    project = {}
    idstr = tree.xpath('//section[@class="order-info component card"]/div[@class="item"]//span/text()')[0]
    pid = idstr.strip('#')
    print(f'parse project:{pid}')
    ctime = tree.xpath('//p[contains(text(),"发布该需求")]/text()')

    if len(ctime) == 0:
        ctime = ""
    else:
        ctime = ctime[0]
        ctime = ctime[ctime.find('于') + 1:ctime.find('发布该需求')].strip()
    false = False
    true = True
    null = "null"
    s = tree.xpath('//script[contains(text(),"__INITIAL_STATE__")]/text()')[0]
    s = s[s.find('{'):s.rfind('}') + 1]
    dd = json.loads(s)
    successUser = dd['orderInfo']['successUser']
    bids_num = len(dd['worksViewInfo']['dataList'])
    bids = []
    bids.append(str(successUser))
    for i in range(bids_num):
        bidid = dd['worksViewInfo']['dataList'][i]['source']['userId']
        bids.append(str(bidid))
    title = tree.xpath('//section[@class="order-info component card"]/div[2]//p/text()')[0]
    # tags = tree.xpath('//*[@id="utopia_widget_3"]/div[1]/div/span/text()')
    cost = float(tree.xpath('//section[@class="order-info component card"]/div[3]//p/text()')[0].strip('元'))
    describe = ''.join(tree.xpath('//section[@class="order-info component card"]/div[5]//p/text()')) \
        .replace('\t', '').replace('\r', '').strip()
    rate = tree.xpath('//section[@class="order-appraisal component card"]//span[@class="score"]/text()')
    if len(rate) == 0:
        rate = 0
    else:
        rate = rate[0]
        rate = float(rate)
    project['id'] = pid
    project['ctime'] = str(ctime)
    project['title'] = str(title)
    project['cost'] = cost
    project['describe'] = describe
    project['rate'] = rate
    project['bids'] = ','.join(bids)
    project['win_id'] = successUser
    return project


def parse_ALL_Project():
    project_path_list = ls_project_list()
    proinfo_list = []
    for p_path in project_path_list:
        proinfo_list.append(parse_from_file(p_path))
    #
    # pdata = pd.DataFrame(proinfo_list)
    # pdata.to_excel(os.path.join(ZBJ_DATAPATH, 'zbj_allpinfo.xls'))


if __name__ == '__main__':
    # 搜集用户id   1588
    uidlist = colection_uids()
    # 下载bid   循环下载用户列表
    # bid_url_list = [f'{ZBJ_BID_BASEURL}{i}' for i in uidlist]
    #
    # badbidl = badbidlist
    # faiedlist = download_bids(badbidl)
    # # 循环下载失败的
    # download_bad_bids(faiedlist)

    # 解析所有  需要已经下载好bids
    # parse_ALL_Project()


    # 下载指定列表的bid
    # uidlist_none = ['31737512', '31884599', '29461592', '29069703', '23896940', '24415454', '31341849', '29183719',
    #            '31052738', '31545129', '30849969', '18617177', '29338854', '31269143', '21741065', '12163543',
    #            '30948620', '23251724', '31126440', '31375229', '19712688', '29061682', '17616094', '18714546',
    #            '31664898', '25017698', '22008564', '30958895', '12781020', '30850189', '22606482', '30893037',
    #            '17839232', '14035038']
    # download_bad_bids(uidlist_none)

    # 解析制定文件
    # parse_from_file(get_profile(pid='26787574'))
