import os
import urllib
import urllib.request
from lxml import etree
from zc_getdata.openurl import openurl
from zc_getdata.toSave import saveList, save_Single_Project

ABPATH = r'C:\Users\zxd\PycharmProjects\spider\zc_getdata'

feedbackBLACKIST = {'10087', '107397', '23200', '56074', '62172', '89021'}

BASE_URL = 'http://www.taskcity.com/'


# 获取指定页数范围内的所有project_url页面
def getPlist(baseurl, startpage=2, endpage=5):
    # 项目列表页面的url,保存html页面
    def getPlist_onePage(baseurl, pageid):
        pagename = f'plist{pageid}'
        plist = []
        head = {
            "User-Agent": "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.99 Mobile Safari/537.36"
        }
        req = urllib.request.Request(url=baseurl, headers=head)
        html = ""
        try:
            response = urllib.request.urlopen(req, timeout=5)
            html = response.read().decode('utf-8')
            print(f'{pagename}.html')
            plisthtmlPath = f'./data/plisthtml/{pagename}.html'
            file = open(plisthtmlPath, 'w', encoding='utf-8')
            file.write(str(html))
            file.close()
        except urllib.error.URLError as e:
            print(f"{pagename} error!")
            if hasattr(e, "code"):
                print(e.code)
            if hasattr(e, "reason"):
                print(e.reason)
        return html

    for i in range(startpage, endpage):
        i_url = baseurl + '?page=' + str(i)
        getPlist_onePage(i_url, i)


# 遍历p_list得到project_page并保存
def get_Project_Html():
    # 遍历所有p_list页面，获取所有项目的url，保存至 P_URl_list指定目录
    def get_P_URl_list(Purl_list_name='project_url'):
        def get_imlist(path):
            return [os.path.join(f) for f in os.listdir(path) if f.endswith('.html')]

        def parse_projectList(htmlname):
            parser = etree.HTMLParser(encoding='utf-8')
            tree = etree.parse('./data/plisthtml/' + htmlname, parser=parser)
            l = tree.xpath('//h4/a/@href')
            return l

        P_Html_list = []
        P_Html_list = get_imlist('data/plisthtml/')
        P_URl_list = []
        for p in P_Html_list:
            P_URl_list = P_URl_list + parse_projectList(p)

        # saveList(P_URl_list, Purl_list_name)
        return P_URl_list

    def get_phtml_Num():
        return len(os.listdir('./data/projectHtml'))

    P_URl_list = get_P_URl_list()
    # 断点续传
    start = max(get_phtml_Num() - 2, 0)
    print(f'start from {start}:{P_URl_list[start]}')
    for purl in P_URl_list[start:]:
        head, tail = os.path.split(purl)
        openurl(purl, './data/projectHtml', saveName=f'p_{tail}.html', timeout=5)


# 从单个project_page 解析信息
def getproject_info(htmlname) -> dict:
    print(f'parse pinfo:{htmlname}')
    parser = etree.HTMLParser(encoding='utf-8')
    tree = etree.parse(htmlname, parser=parser)
    p = {}
    p['pid'] = tree.xpath('//div[@class="panel-body"]/div[@class="list"]/div[1]/span/text()')[0]
    p['state'] = tree.xpath('//div[@class="panel-body"]/div[@class="list"]/div[2]/span/text()')[0]
    p['cost'] = tree.xpath('//div[@class="panel-body"]/div[@class="list"]/div[3]/span/text()')[0]
    p['timecost'] = tree.xpath('//div[@class="panel-body"]/div[@class="list"]/div[4]/span/text()')[0]
    # 发包方
    p['owner'] = ''
    # 接包方 使用set去重
    contractor_name = tree.xpath('//table[@class ="bid_table "]//td/span[@id="win_bid"]/../a[2]/text()')
    contractor_url = tree.xpath('//table[@class ="bid_table "]//td/span[@id="win_bid"]/../a[2]//@href')
    if len(contractor_name) != 0:
        contractor_name = contractor_name[0]
        contractor_url = contractor_url[0]
    contractor = contractor_name, contractor_url
    p['contractor_name'] = contractor_name
    p['contractor_url'] = contractor_url
    # 项目名称
    p['pname'] = tree.xpath('//a[@class="tag-black"][2]/text()')[0]
    # 发布日期
    p['date'] = tree.xpath('//div[@class="panel-body"]/div[@class="list"]/div[5]/span/text()')[0]
    # 项目描述
    p['description'] = ''
    # 抢包列表bids，使用set去重
    bidders_name = tree.xpath('//table[@class ="bid_table "]//td/a[2]//text()')
    bidders_url = tree.xpath('//table[@class ="bid_table "]//td/a[2]/@href')
    bids = []
    for i in range(len(bidders_name)):
        bid = bidders_name[i], BASE_URL + bidders_url[i]
        bids.append(bid)
    p['bids'] = set(bids)
    return p


#
def get_AllProjectInfo():
    def get_pInfo_Num():
        return len(os.listdir('./data/pInfo'))

    htmlpath = './data/projectHtml/'

    def get_imlist(path):
        return [os.path.join(htmlpath, f) for f in os.listdir(path) if f.endswith('.html')]

    project_page_list = get_imlist(htmlpath)
    l = []
    # 断点续传
    start = max(get_pInfo_Num() - 2, 0)
    print(f'start from {start} ,{project_page_list[start - 1]}!')
    for pro_page in project_page_list[start:]:
        l.append(getproject_info(pro_page))
        p = getproject_info(pro_page)
        save_Single_Project(p)
    return l


def get_rate_From_bids(bid, pid):
    if (feedbackBLACKIST.__contains__(bid)):
        return 0
    FNAME = f'{bid}_feedback.html'
    FPATH = rf'{ABPATH}\data\bfeedbackHtml'
    PATH_SAVE_NAME = FPATH + '/' + FNAME
    burl = f'https://www.taskcity.com/users/{bid}/feedback'
    bidfb_page = ''
    if os.path.isfile(PATH_SAVE_NAME):
        parser = etree.HTMLParser(encoding='utf-8')
        bidfb_page = etree.parse(PATH_SAVE_NAME, parser=parser)
    else:
        page = openurl(burl, FPATH, FNAME)
        try:
            bidfb_page = etree.HTML(page)
        except:
            return 0
    r_list = ['差评', '中评', '好评']
    res = 0
    rate = bidfb_page.xpath(f'//tr//a[contains(@href,{pid})]/ancestor::tr/td[1]/img/@title')
    if len(rate) != 0:
        r = rate[0]
        return r_list.index(r) + 1
    return res


if __name__ == '__main__':
    # get_Project_Html()
    get_AllProjectInfo()
    # bid = '34235'
    # pid = 223513
    # res = get_rate_From_bids(bid, pid)
    # print(res)
