from lxml import etree
import requests
from time import sleep
import pandas as pd
import re
# 作者微信登不了网页版，因此在客户端打开公众号历史界面直接保存源码至txt再提取解析

def f_url(url):
    return url[:-25]


def clean_url(file):
    with open(file, 'r', encoding='utf8') as f:
        html = f.read()
    xml = etree.HTML(html)
    each_page = xml.xpath('//div[@class="weui_msg_card js_card"]')
    dic = {}
    for page in each_page:
        time = page.xpath('./div[1]/text()')[0]     # 发布时间
        url = page.xpath('./div[2]/div/@hrefs')
        dic[time] = list(map(f_url, url))
    # 加载不出来的部分的url
    dic['2019年7月25日'] = ['https://mp.weixin.qq.com/s/ZvjbG_1LUhrVE8OTstDY4A',
                           'https://mp.weixin.qq.com/s/5fmKUEYRt5UfSUnpLXzd3w',
                           'https://mp.weixin.qq.com/s/6qD-lxUTvCRFj2urQHIhRg',
                           'https://mp.weixin.qq.com/s/7YYABNvM3c3vElzB6dYz1w',
                           'https://mp.weixin.qq.com/s/e_8O-hTm980xYtUM34wj4Q',
                           'https://mp.weixin.qq.com/s/k-jr2CJsUEEjxytBueZoyA',
                           'https://mp.weixin.qq.com/s/B_-6WeqmtykL3fAjhu6eLA',
                           'https://mp.weixin.qq.com/s/pT1s0jIBM-2mv6PiKI2W8w']
    dic['2019年7月18日'] = ['https://mp.weixin.qq.com/s/1LJCA2G4owyRaOShuzbYkQ',
                           'https://mp.weixin.qq.com/s/xKS4BJg9bZDOTUCvGzkPKw',
                           'https://mp.weixin.qq.com/s/ff9q5NtHS0pGke95_D5tDw',
                           'https://mp.weixin.qq.com/s/CKrot9rtIazlN3Z9dPzKOQ',
                           'https://mp.weixin.qq.com/s/CHeg1V6McDz_FwtJsPf7Rg',
                           'https://mp.weixin.qq.com/s/oB6cenWrtDLFlFW-YK40nA',
                           'https://mp.weixin.qq.com/s/vWiBoFFo6Ziftv3cCxqdbw',
                           'https://mp.weixin.qq.com/s/Mkx1fXHcHSsifySub7t6iA']
    dic['2019年6月14日'] = ['https://mp.weixin.qq.com/s/NKAATvxNgfysd0Xlff-iDQ',
                           'https://mp.weixin.qq.com/s/MrYiVhNdHkR7PjqHadkNAw']
    return dic


def f_price(s):
    if '限时抢购' in s:
        return s
    else:
        return ''


def f_choose_section(xml):
    text = ''.join(xml.xpath('.//text()'))
    if '抢购日期' in text or '购买日期' in text:
        return text
    else:
        return ''


class Spider:
    def __init__(self):
        self.dic = clean_url('公众号历史网页源代码.txt')
        self.headers = {'UserAgent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.16 Safari/537.36'}

    def req1(self):
        for i in self.dic:
            for u in self.dic[i]:
                info = {'时间': i}
                response = requests.get(u, headers=self.headers)
                print(response)
                html = response.text
                xml = etree.HTML(html)
                parent_xml = xml.xpath('//div[@class="rich_media_content "]/section')    # 抢购价
                result = ''
                ind = 0
                for child_xml in parent_xml[::-1]:
                    text = ''.join(child_xml.xpath('.//text()'))
                    if '抢购日期' in text or '购买日期' in text:
                        result += text
                        ind = parent_xml.index(child_xml)
                if ind:
                    result = ''.join(parent_xml[ind-1].xpath('.//text()'))+result
                # result = ''.join(list(map(f_choose_section, parent_xml)))
                print(result)
                name = ''.join(re.findall('▣(.*?)抢购', result))
                name = re.sub(r'\s', '', name)
                info['抢购名称'] = name
                flag = re.findall('抢购价：(.*?)抢购日期', result)
                price = ''.join(flag)
                if not flag:
                    price = ''.join(re.findall('抢购价：(.*?)购买日期', result))
                price = re.sub(r'\s', '', price)
                info['抢购价'] = price
                introduce = ''.join(re.findall('抢购日期：|购买日期：(.*)', result))
                info['备注'] = introduce
                print(name, price, introduce)
                info_ls.append(info)
                sleep(.5)


if __name__ == "__main__":
    spider = Spider()
    info_ls = []
    spider.req1()
    print('数据量=', len(info_ls))
    pd.DataFrame(info_ls).to_excel('数据.xlsx')
