import requests,json,time,uuid,re
from urllib import parse
from lxml import etree
from constant import ArticleCounter
from apscheduler.schedulers.background import BackgroundScheduler
#获得热门关键词
def getHotWords():
    page_url = 'http://www.5ce.com/hot/360'
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',
        'Refer': 'http://www.5ce.com/',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8'
    }
    response = requests.get(page_url,headers=headers)
    html = etree.HTML(response.text)
    urls = html.xpath('//div[@class="keyword-button"]/a[last()]/@href')
    keywords = []
    for url in urls:
        # 从相对url中的关键词提取出来
        keyword = url[len('/sc/'):len(url)]
        keywords.append(keyword)
    return keywords

#通过热门关键词所搜结果,出来的是这个关键词的聚合页
def searchHotWord(hot_word):
    #将关键词进行url编码用来构造refer
    word_encode = parse.quote_plus(hot_word)
    refer = 'http://www.5ce.com/sc/' + word_encode
    searchUrl = 'http://www.5ce.com/api/sucai/list?dataType=dataList'
    searchHeaders = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36',
        'Host':'www.5ce.com',
        'Origin':'http://www.5ce.com',
        'Refer':refer,
        'X-Requested-With':'XMLHttpRequest'
    }
    payLoad = {
        'resourcePlatformGuid': '',
        'keywordsList': hot_word,
        'catalogGuid': '',
        'authorGuid': '',
        'articleContentLikeString': '',
        'articleContentNotlikeString': '',
        'dateRange': ['', ''],
        '0': '',
        '1': '',
        'isKOL': '-999',
        'isOriginal': '-999',
        'issueTimeEnd': '',
        'issueTimeStart': '',
        'orderByColumn': 'issueTime',
        'pageIndex': '1',
        'readCountMax': '-999',
        'readCountMin': '-999',
        'titleLikeString': ''
    }
    response = requests.post(searchUrl, json=payLoad, headers=searchHeaders)
    #对结果进行处理
    data = json.loads(response.text)['data']
    print(data)
    return data

#根据data获得页面
def getHotContent(datas,hot_word):
    for data in datas:
        #构造refer
        word_encode = parse.quote_plus(hot_word)
        refer = 'http://www.5ce.com/sc/' + word_encode
        headers = {
                'Host':'www.5ce.com','Pragma':'no-cache','Referer':refer,'Upgrade-Insecure-Requests':'1',
                'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'
                  }
        url = 'http://www.5ce.com/view/sc/' + data['guid'] +'/'+ hot_word
        response = requests.get(url,headers = headers)
        #正则匹配出文章
        result = re.search('(?<=window.__APP_STATE.\$data = ).*?(?=</script>)',response.text,re.S)
        if result:
            article = result.group(0)
            article = article.replace('\\','')
            #找到文章中的所有图片
            html = etree.HTML(article)
            imglist = html.xpath('//img/@data-src')
            #获得当前年月日
            format_time = time.strftime('%Y/%m/%d',time.localtime(time.time()))
            #当天的文章总数
            ArticleCounter.counter += 1
            count = str(ArticleCounter.counter).zfill(2)
            #替换图片路径
            for src in imglist:
                src = src.replace('\\','')
                original_src = src
                if '&url=' in src:
                    index = src.rindex('&url=') + 5
                    original_src = src[index:]
                replace_src = 'https://img.66826.com/Pics/'+format_time+'/a_'+count+'/' + str(uuid.uuid1()) + '.jpg?src=' + original_src
                article = article.replace(src, replace_src)
            time.sleep(5)
            print(article)





def job():
    ArticleCounter.counter = 0
    print(ArticleCounter.counter)

def main():
    scheduler = BackgroundScheduler ()
    # 每天将文章计数器清零
    scheduler.add_job(job, 'interval', days=1)
    scheduler.start()
    hot_words = getHotWords()
    for hot_word in hot_words:
       data = searchHotWord(hot_word)
       getHotContent(data,hot_word)


if __name__ == '__main__':
    main()