#!/usr/bin/env python
# coding: utf-8

# <center><font size = "6">南方都市报微信公众号内容</font></center>  

# # 数据挖掘 — 南方都市报微信公众号内容抓取
# ## 项目要求 
# - 使用selenium进入微信公众平台
# - 在微信公众平台寻找指定的公众号
# - 抓取该公众号指定时间区间的文章（不低于50页数据/不低于1年的数据）
# - 导出文章信息（应包含标题，时间，文章url链接以及文章文本内容）

# # 准备工作

# In[54]:


# 导入所需模块
from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
import pandas as pd
import numpy as np
from lxml.html import fromstring
import time
from random import random
from requests_html import HTMLSession

#caps=dict()
#caps["pageLoadStrategy"] = "none"   # Do not wait for full page load

opts = webdriver.ChromeOptions()
opts.add_argument('--no-sandbox')#解决DevToolsActivePort文件不存在的报错
opts.add_argument('window-size=1920x3000') #指定浏览器分辨率
opts.add_argument('--disable-gpu') #谷歌文档提到需要加上一这个属性来规避bug
opts.add_argument('--hide-scrollbars') #隐藏滚动条, 应对些特殊页面
#opts.add_argument('blink-settings=imagesEnabled=false') #不加载图片, 提升速度
#opts.add_argument('--headless') #浏览器不提供可视化页面. linux下如果系统不支持可视化不加这条会启动失败
# opts.binary_location = "C:\portable\PortableApps\IronPortable\App\Iron\chrome.exe"
# opts.binary_location = "C:\Program Files\Google\Chrome\Application\chromedriver.exe" #"H:\_coding_\Gitee\InternetNewMedia\CapstonePrj2016\chromedriver.exe"  


driver = webdriver.Chrome( chrome_options = opts) #desired_capabilities=caps,


# In[53]:


# 输入“公众号”参数
公众号 = "南方都市报"
# 指定内容输出的位置
fn = { "output" : { "公众号_htm_snippets": "data_raw_src_/公众号_htm_snippets_{公众号}.tsv",
                    "公众号_df": "data_raw_src_/公众号_df_{公众号}.tsv",
                    "公众号_xlsx": "公众号_url_{公众号}.xlsx" } \
      }


# In[55]:


# 网址信息
driver.get("https://mp.weixin.qq.com")


# # 自动化登录 — 需扫码操作

# In[57]:


# 账号、密码信息
payload =  {"account": "请输入您的账号", "password": "请输入您的密码"}


# In[56]:


# 点击使用账号、密码登录
element = driver.find_element_by_xpath('//a[@class="login__type__container__select-type"]')
# 不要直接 click（） 等相关操作，首先要检查是否通过 xpath 找的正确的element
element.get_attribute('innerHTML')
element.click()


# In[58]:


# 输入账号 — clear（）操作清除 以防输入框内有内容
element = driver.find_element_by_xpath('//input[@name="account"]')
element.get_attribute('innerHTML')
element.clear()
element.send_keys(payload['account'])


# In[59]:


# 输入密码
element = driver.find_element_by_xpath('//input[@name="password"]')
element.get_attribute('innerHTML')
element.clear()
element.send_keys(payload['password'])


# In[60]:


# 登录跳转
element = driver.find_element_by_xpath('//a[@class="btn_login"]')
element.get_attribute('innerHTML')
element.click()


# # 寻找选单

# In[61]:


# 公众号页面左侧展开
element = driver.find_element_by_xpath('//a[@id="m_open"]')
element.click()


# In[63]:


# 点击图文素材
element = driver.find_element_by_xpath('/html/body/div[4]/div[2]/ul/li[2]/ul/li[1]/a') 
element.click()


# In[64]:


# 点击 "+"  新的创作
element = driver.find_element_by_xpath('//i[@class="weui-desktop-card__icon-add"]')
element.click()


# In[65]:


# “写新图文” —>  链接跳转
element = driver.find_element_by_xpath('//a//i[@class="icon-svg-editor-appmsg"]') 
element.click()


# # 窗口信息检查 — 并定位在当前窗口下进行操作

# In[66]:


# 两个窗口下 进行窗口定位 
# 窗口信息检查（>1）
driver.window_handles


# In[67]:


# 窗口切换
driver.switch_to_window(driver.window_handles[1])


# In[68]:


# 点击超链接
element = driver.find_element_by_xpath('//li[@id="js_editor_insertlink"]') 
element.click()


# In[69]:


# 点击选择其它公众号
element = driver.find_element_by_xpath('//button[@class="weui-desktop-btn weui-desktop-btn_default"]')
element.click()


# In[70]:


# input 输入关键词
element = driver.find_element_by_xpath('//input[@placeholder="输入文章来源的公众号名称或微信号，回车进行搜索"]')
element.get_attribute('innerHTML')
element.clear()
element.send_keys(公众号)


# In[71]:


# 点放大镜搜
element = driver.find_element_by_xpath('//button[@class="weui-desktop-icon-btn weui-desktop-search__btn"]')
main_content = element.get_attribute('innerHTML')
print(main_content)
element.click()


# In[72]:


element = driver.find_element_by_xpath('//ul[@class="inner_link_account_list"]')
main_content = element.get_attribute('innerHTML')
print(main_content)
公众号SERP = main_content


# In[73]:


# 解析
import pandas as pd
from lxml.html import fromstring
root = fromstring(公众号SERP) 


# In[74]:


主 = root.xpath('//li[@class="inner_link_account_item"]')

account_list = []
for e in 主:
    account_nickname = e.xpath('./div/strong[@class="inner_link_account_nickname"]')[0].text
    account_wechat = e.xpath('./div/i[@class="inner_link_account_wechat"]')[0].text
    account_img = e.xpath('./div/img/@src')[0]
    account = {"nickname": account_nickname, "wechat": account_wechat, "img": account_img,}
    account_list.append(account)

df_account = pd.DataFrame(account_list)
df_account


# # 获取公众号文章链接和正文

# In[75]:


element = driver.find_element_by_xpath('//ul[@class="inner_link_account_list"]/li')
main_content = element.get_attribute('innerHTML')
print(main_content)
element.click()


# In[79]:


# 跳转testing
'''
跳转_input = driver.find_element_by_xpath('//span[@class="weui-desktop-pagination__form"]/input')
跳转_a = driver.find_element_by_xpath('//span[@class="weui-desktop-pagination__form"]/a')
跳转_title = driver.find_element_by_xpaht('//div[@class="inner_link_article_title"]//span//text()')
跳转_input.clear()
跳转_input.send_keys(2)
跳转_a.click()
'''


# In[82]:


# 跳转上限
l_e = driver.find_elements_by_xpath('//label[@class="weui-desktop-pagination__num"]')
l_e_int  = [int(x.text) for x in l_e] 
print (l_e_int)
print (l_e_int[0]==l_e_int[-1])


# In[83]:


pages = list(range(l_e_int[0],l_e_int[-1]+1 ))
#print(pages[0:2])
pages = list(range(1,l_e_int[-1]+1 ))
print(pages)


# In[84]:


# 获取前 60 页的内容
pages = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60]


# In[85]:


# global varialbes 
# 循环 遍历
html_raw = dict()
main_content =""
element = None


# In[86]:


def process_pages (pages):
    for p in pages:
        print (p,end='\t')

        跳转_input = driver.find_element_by_xpath('//span[@class="weui-desktop-pagination__form"]/input')
        跳转_a = driver.find_element_by_xpath('//span[@class="weui-desktop-pagination__form"]/a')
        跳转_input.clear()
        跳转_input.send_keys(p)
        跳转_a.click()

        time.sleep(45+120*random())

        element = driver.find_element_by_xpath('//div[@class="inner_link_article_list"]')
        main_content = element.get_attribute('innerHTML')
        #print(main_content)
        html_raw[p] = main_content


# In[87]:


process_pages (pages)


# In[88]:


df = pd.DataFrame([html_raw]).T
df.columns = ["html_snippets"]
df.loc[0:1]


# In[89]:


get_ipython().run_line_magic('store', 'html_raw')
import pickle 
filehandler = open("html_raw", 'wb') 
pickle.dump(html_raw, filehandler)


# In[90]:


# df.duplicated()  默认所有列，无重复记录  【duplicated()函数】判断是否有重复项
df_out = df[~df.duplicated()]
print (len(df_out))
df[df.duplicated()]


# In[91]:


try_again = list(df[df.duplicated()].index)
print(try_again)
try_again = try_again + list (set(pages).difference(set(df.index.values)))
try_again


# In[92]:


# 暂存档
filename = fn ["output"] ["公众号_htm_snippets"] 
df_out.to_csv(filename.format(公众号=公众号), sep="\t", encoding="utf8")


# In[94]:


def get_content(link):
    session = HTMLSession()
    r = session.get(url=link)
    content_xpath_1 = '//*[@id="js_content"]//span/text()'
    content_xpath_2 = '//*[@id="js_content"]//p/text()'
    content_1 = ''.join(r.html.xpath(content_xpath_1))
    content_2 = ''.join(r.html.xpath(content_xpath_2))
    return content_1 + content_2

def parse_html_snippets(_snippet_):
    root = fromstring(_snippet_) 
    title = [x.text for x in root.xpath('//div[@class="inner_link_article_title"]//span[2]')]
    create_time = [x.text for x in root.xpath('//div[@class="inner_link_article_date"]')]
    link = [x for x in root.xpath('//a/@href')]
    content_text = [get_content(x) for x in link]
    _df_ = pd.DataFrame({"title":title, "create_time": create_time, "link":link, "content_text":content_text})
    return(_df_)
    
l_df = []
for p in pages:
    _df_ = parse_html_snippets(df.loc[p,"html_snippets"])
    print (len(_df_), end=",")
    l_df.append(_df_)


# In[95]:


df_url_out = pd.concat(l_df).reset_index(drop=True)
df_url_out


# In[96]:


# 试验公众号文章链接是否正确
df_url_out.loc[0].link


# In[97]:


# 找出重复项
df_url_out[df_url_out.duplicated()]


# In[98]:


# 余下为不重复的部分
df_url_out[~df_url_out.duplicated()]


# In[99]:


# 将抓取到的内容保存到本地 —— 数据输出
with pd.ExcelWriter('{公众号}公众号链接及文章内容.xlsx'.format(公众号=公众号),mode='w',engine="openpyxl") as writer:  
            df_url_out.to_excel(writer, sheet_name=公众号)

