# 一、准备工作

公众号 = "卷宗Wallpaper"
fn = { "output" : { "公众号_htm_snippets": "data_raw_src/公众号_htm_snippets_{公众号}.tsv",
                    "公众号_df": "data_raw_src/公众号_df_{公众号}.tsv",
                    "公众号_xlsx": "data_sets/公众号_url_{公众号}.xlsx" } \
      }

import pandas as pd
import numpy as np
from lxml.html import fromstring
import time
from random import random
from requests_html import HTMLSession

from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities

opts = webdriver.ChromeOptions()
opts.add_argument('--no-sandbox')#解决DevToolsActivePort文件不存在的报错
opts.add_argument('window-size=1920x3000') #指定浏览器分辨率
opts.add_argument('--disable-gpu') #谷歌文档提到需要加上一这个属性来规避bug
opts.add_argument('--hide-scrollbars') #隐藏滚动条, 应对些特殊页面

driver = webdriver.Chrome( chrome_options = opts) #desired_capabilities=caps,


driver.get("https://mp.weixin.qq.com")


# 二、自动化登录（需扫码）

payload =  {"account": "XXXXXX", "password": "XXXXXX"} # 自行更改
# 切换为账号密码登录
driver.find_element_by_xpath('//div[@class="login__type__container login__type__container__scan"]/a').click()
## 清空账号input
driver.find_element_by_xpath('//form[@class="login_form"]//input[@name="account"]').clear()
driver.find_element_by_xpath('//form[@class="login_form"]//input[@name="account"]').send_keys(payload['account'])

## 清空密码input
driver.find_element_by_xpath('//form[@class="login_form"]//input[@name="password"]').clear()
driver.find_element_by_xpath('//form[@class="login_form"]//input[@name="password"]').send_keys(payload['password'])
driver.find_element_by_xpath('//div[@class="login_btn_panel"]/a').click()


# 三、找选单

## 展开
element = driver.find_element_by_xpath('//a[@id="m_open"]')
element.click()
## 点击图文素材
element = driver.find_element_by_xpath('/html/body/div[4]/div[2]/ul/li[2]/ul/li[1]/a')
element.click()
## 点击+
element = driver.find_element_by_xpath('//*[@id="js_main"]/div[3]/div[2]/div/div/div/div[1]/div/div[1]/div[1]/i')
element.click()
## 新建图文消息
element = driver.find_element_by_xpath('//*[@id="js_main"]/div[3]/div[2]/div/div/div/div[1]/div/div[1]/div[2]/ul/li[1]/a')
element.click()
# 检查窗口信息
print (driver.window_handles)
# 跳转窗口
driver.switch_to_window(driver.window_handles[1])
## 点击超链接
element = driver.find_element_by_xpath('//div[@class="mp-head"]//div[@class="media_list_box_inner"]/ul[@class="tpl_list"]/li[@id="js_editor_insertlink"]')
element.click()
## 点击选择其他公众号
element = driver.find_element_by_xpath('//*[@id="vue_app"]/div[2]/div[1]/div/div[2]/div[2]/form[1]/div[3]/div/div/p/div/button')
element.click()
## 输入公众号名称
driver.find_element_by_xpath('//*[@id="vue_app"]/div[2]/div[1]/div/div[2]/div[2]/form[1]/div[3]/div/div/div/div/span/input').clear()
driver.find_element_by_xpath('//*[@id="vue_app"]/div[2]/div[1]/div/div[2]/div[2]/form[1]/div[3]/div/div/div/div/span/input').send_keys(公众号)
# 点“放大镜”图标进行搜索
element = driver.find_element_by_xpath('/html/body/div[2]/div/div/div/div/div[6]/div[2]/div[1]/div/div[2]/div[2]/form[1]/div[3]/div/div/div/div/span/span/button[2]')
main_content = element.get_attribute('innerHTML')
print(main_content)
element.click()

element = driver.find_element_by_xpath('//ul[@class="inner_link_account_list"]')
main_content = element.get_attribute('innerHTML')
print(main_content)
公众号SERP = main_content

# 解析
root = fromstring(公众号SERP) 

主 = root.xpath('//li[@class="inner_link_account_item"]')

account_list = []
for e in 主:
    account_nickname = e.xpath('./div/strong[@class="inner_link_account_nickname"]')[0].text
    account_wechat = e.xpath('./div/i[@class="inner_link_account_wechat"]')[0].text
    account_img = e.xpath('./div/img/@src')[0]
    account = {"nickname": account_nickname, "wechat": account_wechat, "img": account_img,}
    account_list.append(account)

df_account = pd.DataFrame(account_list)


# 四、获取公众号文章链接和正文

element = driver.find_element_by_xpath('//ul[@class="inner_link_account_list"]/li')
main_content = element.get_attribute('innerHTML')
print(main_content)
element.click()

# 跳转上限
l_e = driver.find_elements_by_xpath('//label[@class="weui-desktop-pagination__num"]')
l_e_int  = [int(x.text) for x in l_e] 
print (l_e_int)
print (l_e_int[0]==l_e_int[-1])

pages = list(range(1,l_e_int[-1]+1 ))
print(pages)

# 获取前50页文章
pages = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50]

html_raw = dict()
main_content =""
element = None

def process_pages (pages):
    for p in pages:
        print (p,end='\t')

        跳转_input = driver.find_element_by_xpath('//span[@class="weui-desktop-pagination__form"]/input')
        跳转_a = driver.find_element_by_xpath('//span[@class="weui-desktop-pagination__form"]/a')
        跳转_input.clear()
        跳转_input.send_keys(p)
        跳转_a.click()

        time.sleep(45+120*random())

        element = driver.find_element_by_xpath('//div[@class="inner_link_article_list"]')
        main_content = element.get_attribute('innerHTML')
        #print(main_content)
        html_raw[p] = main_content

process_pages(pages)

df = pd.DataFrame([html_raw]).T
df.columns = ["html_snippets"]

get_ipython().run_line_magic('store', 'html_raw')
import pickle 
filehandler = open("html_raw", 'wb') 
pickle.dump(html_raw, filehandler)

df_out = df[~df.duplicated()]
print (len(df_out))
df[df.duplicated()]

try_again = list(df[df.duplicated()].index)
print(try_again)
try_again = try_again + list (set(pages).difference(set(df.index.values)))
try_again

filename = fn ["output"] ["公众号_htm_snippets"] 
df_out.to_csv(filename.format(公众号=公众号), sep="\t", encoding="utf8")


def get_content(link):
    """获取文章正文"""
    session = HTMLSession()
    r = session.get(url=link)
    content_xpath_1 = '//*[@id="js_content"]//span/text()'
    content_xpath_2 = '//*[@id="js_content"]//p/text()'
    content_1 = ''.join(r.html.xpath(content_xpath_1))
    content_2 = ''.join(r.html.xpath(content_xpath_2))
    return content_1 + content_2

def parse_html_snippets(_snippet_):
    root = fromstring(_snippet_) 
    title = [x.text for x in root.xpath('//div[@class="inner_link_article_title"]/span[2]')]
    create_time = [x.text for x in root.xpath('//div[@class="inner_link_article_date"]')]
    link = [x for x in root.xpath('//a/@href')]
    content_text = [get_content(x) for x in link]
    _df_ = pd.DataFrame({"title":title, "create_time":create_time, "link":link, "content_text":content_text})
    return(_df_)
    
l_df = []
for p in pages:
    _df_ = parse_html_snippets(df.loc[p,"html_snippets"])
    print (len(_df_), end=",")
    l_df.append(_df_)

df_url_out = pd.concat(l_df).reset_index(drop=True)

# 保存表格
with pd.ExcelWriter('data_out/{公众号}公众号链接及文章.xlsx'.format(公众号=公众号),mode='w',engine="openpyxl") as writer:  
            df_url_out.to_excel(writer, sheet_name=公众号)