import os

from lxml import etree

from zc_getdata.openurl import openurl

ABPATH = r'C://Users//HP//PycharmProjects//spider//zbj//'
ZBJ_DATAPATH = ABPATH + 'zbjdata/'
ZBJ_TASK_BASEURL = r'https://m.zbj.com/task/'
ZBJ_BID_BASEURL = r'https://shop.zbj.com/'
BID_PREFIX = "zbj_user_"
PRO_PREFIX = "zbj_"

faied_project_list = []
faied_bid_list = []
bid_blacklist = []


def get_bidname(uid):
    return f'{BID_PREFIX}{uid}.html'


def get_bidfile(uid):
    return ZBJ_DATAPATH + f'/bidhtml/{get_bidname(uid)}'


def get_proname(pid):
    return f'{PRO_PREFIX}{pid}.html'


def get_profile(pid):
    return ZBJ_DATAPATH + f'/prohtml/{get_proname(pid)}'


def ls_project_list():
    propath = ZBJ_DATAPATH + '/prohtml'
    project_path_list = [os.path.join(propath, f) for f in os.listdir(propath) if f.endswith('.html')]
    return project_path_list


def get_zbj_project_html(homeurl):
    def get_pagenum(url):
        html = openurl(url + "/?t=1&s=2")
        tree = etree.HTML(html)
        pagenum = tree.xpath('//*[@id="utopia_widget_8"]/a[last()-1]/text()')[0]
        return int(pagenum)

    def get_proURL(html):
        tree = etree.HTML(html)
        proid_list = tree.xpath(
            '//div[@class ="list-result search-result-list"]/div[@class="result-search-item"]/@data-taskid')
        # proid_list = [baseurl+l for l in proid_list]
        return proid_list

    # pagerange = get_pagenum(homeurl)
    pagerange = 84
    res_list = []
    for i in range(1, pagerange + 1):
        html = openurl(f'{BASEURL + "page"}{i}{".html?t=1&s=2"}',
                       r'C:\Users\zxd\PycharmProjects\spider\zbj\zbjdata',
                       'zbjhtml.html')
        res_list.append(get_proURL(html))
    flatlist = sum(res_list, [])
    return flatlist


def del_zbj_user_html(uid):
    os.remove(get_bidfile(uid))


def del_zbj_project_html(id):
    os.remove(ZBJ_DATAPATH + 'prohtml/' + f'zbj_{id}.html')


def del_zbjhtml(id):
    os.remove(ZBJ_DATAPATH + 'prohtml/' + f'zbj_{id}.html')


def parse_proInfo_old(tree):
    project = {}
    idstr = tree.xpath('//a[@class="crumbs-item color"]/text()')[0]
    id = idstr[idstr.find('[') + 1: idstr.find(']')]
    print(f'parse:{id}')
    title = tree.xpath('//*[@id="utopia_widget_3"]/div[1]/p/text()')
    tags = tree.xpath('//*[@id="utopia_widget_3"]/div[1]/div/span/text()')
    cost = float(tree.xpath('//*[@id="utopia_widget_3"]/div[1]/span/text()')[0].strip('¥'))
    describe = ''.join(tree.xpath('//*[@id="utopia_widget_3"]/div[2]/p/text()')) \
        .replace('\t', '').replace('\r', '').strip()
    rate = 0
    bids = tree.xpath(' //a[@class="J-invite invite-btn"]/@data-userid')
    if len(bids) == 0:
        print(f'{id},has no bids!')
        del_zbjhtml(id)
        project['id'] = id
        return project
    win_url = tree.xpath('//a[@data-linkid="third-shot-shop"]/@href')
    if len(win_url) == 0:
        project['describe'] = ['failed project']
        return project
    win_url = win_url[0]
    win_id = (win_url).split('/')[-1]
    project['id'] = id
    project['title'] = title
    project['tags'] = ','.join(tags)
    project['cost'] = cost
    project['describe'] = describe
    # 评分
    project['rate'] = rate
    project['bids'] = ','.join(bids)
    project['win_url'] = win_url
    project['win_id'] = win_id
    return project


def download_projects():
    # homelist = [
    #     'https://task.zbj.com/t-rjkf',
    #     'https://task.zbj.com/t-wxptkf',
    #     'https://task.zbj.com/t-wzkf',
    #     'https://task.zbj.com/t-ydyykf',
    #     'https://task.zbj.com/t-uisheji',
    #     'https://task.zbj.com/t-dsspfw',
    #     'https://task.zbj.com/t-game',
    #     'https://task.zbj.com/t-itfangan',
    #     'https://task.zbj.com/t-hkaifa',
    #     'https://task.zbj.com/t-ceshifuwu',
    #     'https://task.zbj.com/t-dashujufuwu',
    # ]
    #不分种类
    homelist ={'https://task.zbj.com/hall/list/?t=1&s=2'}
    for url in homelist:
        pro_idlist = get_zbj_project_html(url)
        for pid in pro_idlist:
            purl = ZBJ_TASK_BASEURL + pid
            html = openurl(purl, ZBJ_DATAPATH + 'prohtml', f'zbj_{pid}.html')
            if str(html).__contains__('该订单已全额退款') \
                    or str(html).__contains__('没有权限'):
                del_zbj_project_html(pid)
            if str(html).__contains__('拖动滑块继续访问'):
                faied_project_list.append(purl)
    print(faied_project_list)


def download_badproject(faiedlist: []):
    newfaiedlist = []
    while len(faiedlist) != 0:
        print(f"download_badproject: {len(faiedlist)}个")
        for url in faiedlist:
            pid = url.split('/')[-1]
            url = f'{ZBJ_TASK_BASEURL}{pid}'
            html = openurl(url, ABPATH + '//zbjdata//prohtml', f'zbj_{pid}.html')
            if str(html).__contains__('拖动滑块继续访问') or \
                    str(html).__contains__('没有权限') or \
                    str(html).__contains__('权限查询异常，请您稍后重试'):
                newfaiedlist.append(url)

        faiedlist = newfaiedlist
        newfaiedlist = []
        print("faied_project_list:")
        print(faiedlist)


def download_bids(bid_url_list: []):
    for iurl in bid_url_list:
        uid = iurl.split('/')[-1]
        html = openurl(iurl, savePath=os.path.join(ZBJ_DATAPATH, 'bidhtml/'), saveName=get_bidname(uid))
        if str(html).__contains__('该订单已全额退款') \
                or str(html).__contains__('没有权限'):
            del_zbj_user_html(uid)
        if str(html).__contains__('拖动滑块继续访问'):
            faied_bid_list.append(iurl)
            del_zbj_user_html(uid)
        if str(html).__contains__('该用户还未开店'):
            print(f'{uid} 未开店')
            del_zbj_user_html(uid)
        if str(html) == "":
            faied_bid_list.append(uid)
    return faied_bid_list


def download_bad_bids(faied_user_list: []):
    newfaiedlist = []
    while len(faied_user_list) != 0:
        print(f"download_bad_bids: {len(faied_user_list)}个")
        for uid in faied_user_list:
            if os.path.isfile(get_bidfile(uid)):
                continue
            # uid = iurl.split('/')[-1]
            iurl = f'{ZBJ_BID_BASEURL}{uid}'
            html = openurl(iurl, savePath=os.path.join(ZBJ_DATAPATH, 'bidhtml/'), saveName=get_bidname(uid))
            if str(html).__contains__('该用户还未开店') \
                    or str(html).__contains__('没有权限'):
                del_zbj_user_html(uid)
            if str(html).__contains__('拖动滑块继续访问'):
                print(f'{uid}:拖动滑块继 remove ')
                newfaiedlist.append(uid)
                del_zbj_user_html(uid)
            if str(html) == "":
                print(f'{uid}:str(html) == "" remove ')
                newfaiedlist.append(uid)
        faied_user_list = newfaiedlist
        newfaiedlist = []
        print("faied_user_list:")
        print(faied_user_list)


if __name__ == '__main__':
    BASEURL = r'https://task.zbj.com/'
    # BASEURL = r'https://m.zbj.com/task/'
    faiedlist = []
    bid_blacklist = []

    #
    # download_projects()
    # 处理badpro

    # 下一步，判断解析没有bid的重新下载
    # ZBJ_TASK_BASEURL = r'https://task.zbj.com/'
    # download_projects()
    # print(faied_project_list)

    # 下载指定uid
    # download_bad_bids(['11290465'])
    download_bids()
