#!/usr/bin/env python
# coding: utf-8

# # 采集公众号

# In[13]:


'''
阿里研究院
阿里健康
阿里巴巴商学院
阿里数据

腾讯金融科技
腾讯研究院
腾讯媒体研究院
腾讯云启研究院
酷鹅用户研究院
南方都市报
36氪Pro
湛江日报
'''
公众号 = "36氪Pro"


# In[14]:


fn = { "output" : { "公众号_htm_snippets": "data_raw_src/公众号_htm_snippets_{公众号}.tsv",
                    "公众号_df": "data_raw_src/公众号_df_{公众号}.tsv",
                    "公众号_xlsx": "data_sets/公众号_url_{公众号}.xlsx" } \
      }


# In[15]:


import pandas as pd
import numpy as np
from lxml.html import fromstring
import time
from random import random

# when selenium main_content is used
# Parses an HTML document from a string constant.  Returns the root nood
# root = fromstring(df.loc[1,"html_snippets"]) 


# ## 使用Selenium
# * 要更改 opts.binary_location 至自己本地的Chrome浏览器，建议portable
# * Chrome浏览器 和 chromedriver.exe要同版本号到小数后一位
# * 要确保可以 开启浏览器机器人
# * 要确保浏览器机器人 可以打开网页 driver.get("https://mp.weixin.qq.com")

# In[16]:


import selenium
help(selenium)


# In[17]:


# coding=utf-8
from selenium import webdriver
import time

wd = webdriver.Chrome()
wd.get("https://www.baidu.com")    # 打开百度浏览器
wd.find_element_by_id("kw").send_keys("selenium")   # 定位输入框并输入关键字
wd.find_element_by_id("su").click()   #点击[百度一下]搜索
time.sleep(3)   #等待3秒
wd.quit()   #关闭浏览器


# In[18]:


from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities


#caps=dict()
#caps["pageLoadStrategy"] = "none"   # Do not wait for full page load

opts = webdriver.ChromeOptions()
opts.add_argument('--no-sandbox')#解决DevToolsActivePort文件不存在的报错
opts.add_argument('window-size=1920x3000') #指定浏览器分辨率
opts.add_argument('--disable-gpu') #谷歌文档提到需要加上一这个属性来规避bug
opts.add_argument('--hide-scrollbars') #隐藏滚动条, 应对些特殊页面
#opts.add_argument('blink-settings=imagesEnabled=false') #不加载图片, 提升速度
#opts.add_argument('--headless') #浏览器不提供可视化页面. linux下如果系统不支持可视化不加这条会启动失败
# opts.binary_location = "C:\portable\PortableApps\IronPortable\App\Iron\chrome.exe"
# opts.binary_location = "C:\Program Files\Google\Chrome\Application\chromedriver.exe" #"H:\_coding_\Gitee\InternetNewMedia\CapstonePrj2016\chromedriver.exe"  


driver = webdriver.Chrome( chrome_options = opts) #desired_capabilities=caps,


# In[19]:


driver.get("https://mp.weixin.qq.com")


# ## 填表登入

# In[20]:


payload =  {"account": "caimingli20000307@163.com", "password": "440882Abc!"}
# payload =  {"account": "NFUHacks@163.com", "password": "NFU706947580"}
driver.find_element_by_xpath('//div[@class="login__type__container login__type__container__scan"]/a').click()


# In[21]:


driver.find_element_by_xpath('//form[@class="login_form"]//input[@name="account"]').clear()
driver.find_element_by_xpath('//form[@class="login_form"]//input[@name="account"]').send_keys(payload['account'])
driver.find_element_by_xpath('//form[@class="login_form"]//input[@name="password"]').clear()
driver.find_element_by_xpath('//form[@class="login_form"]//input[@name="password"]').send_keys(payload['password'])


# In[22]:


driver.find_element_by_xpath('//div[@class="login_btn_panel"]/a').click()


# ## 点选单

# In[40]:


element = driver.find_element_by_xpath('//a[@id="m_open"]')
element.click()
main_content = element.get_attribute('innerHTML')
main_content


# In[41]:


driver.execute_script("window.scrollTo(0,document.body.scrollHeight)")


# In[42]:


element = driver.find_element_by_xpath('//li[@title[contains(.,"图文素材")]]/a') 
# main_content = element.get_attribute('innerHTML')
# main_content
url2= element.get_attribute("href")
url2


# In[43]:


driver.get(url2)


# ## 新建图文消息

# In[44]:


element = driver.find_element_by_xpath('//*[text()[contains(.,"新的创作")]]') 
main_content = element.get_attribute('innerHTML')
main_content
element.click()


# In[47]:


print (driver.window_handles)


# In[48]:


# 新建图文消息开了另一分视窗，所以要切换 switch_to 
driver.switch_to.window(driver.window_handles[-1])


# In[49]:


driver.current_window_handle


# ## 超链接

# In[50]:


element = driver.find_element_by_xpath('//*[text()[contains(.,"超链接")]]') 
main_content = element.get_attribute('innerHTML')
print(main_content)
element.click()


# In[51]:


# 点 选择其他公众号
element = driver.find_element_by_xpath('//*[text()[contains(.,"选择其他公众号")]]') 
main_content = element.get_attribute('innerHTML')
print(main_content)
element.click()


# In[58]:


driver.find_element_by_xpath('//form//div[@class="inner_link_account_area"]//input[@class="weui-desktop-form__input"]').clear()
driver.find_element_by_xpath('//form//div[@class="inner_link_account_area"]//input[@class="weui-desktop-form__input"]').send_keys(公众号)


# In[61]:


# 点放大镜搜
element = driver.find_element_by_xpath('//button[@class="weui-desktop-icon-btn weui-desktop-search__btn"]')
main_content = element.get_attribute('innerHTML')
print(main_content)
element.click()


# In[62]:


element = driver.find_element_by_xpath('//ul[@class="inner_link_account_list"]')
main_content = element.get_attribute('innerHTML')
print(main_content)
公众号SERP = main_content


# In[63]:


# 解析
root = fromstring(公众号SERP) 


# In[64]:


主 = root.xpath('//li[@class="inner_link_account_item"]')

account_list = []
for e in 主:
    account_nickname = e.xpath('./div/strong[@class="inner_link_account_nickname"]')[0].text
    account_wechat = e.xpath('./div/i[@class="inner_link_account_wechat"]')[0].text
    account_img = e.xpath('./div/img/@src')[0]
    account = {"nickname": account_nickname, "wechat": account_wechat, "img": account_img,}
    account_list.append(account)

df_account = pd.DataFrame(account_list)


# In[65]:


df_account


# In[66]:


element = driver.find_element_by_xpath('//ul[@class="inner_link_account_list"]/li')
main_content = element.get_attribute('innerHTML')
print(main_content)
element.click()


# In[67]:


# 跳转testing
'''
跳转_input = driver.find_element_by_xpath('//span[@class="weui-desktop-pagination__form"]/input')
跳转_a = driver.find_element_by_xpath('//span[@class="weui-desktop-pagination__form"]/a')
跳转_input.clear()
跳转_input.send_keys(2)
跳转_a.click()
'''


# In[68]:


# 跳转上限
l_e = driver.find_elements_by_xpath('//label[@class="weui-desktop-pagination__num"]')
l_e_int  = [int(x.text) for x in l_e] 
print (l_e_int)
print (l_e_int[0]==l_e_int[-1])


# In[69]:


pages = list(range(l_e_int[0],l_e_int[-1]+1 ))
#print(pages[0:2])
pages = list(range(1,l_e_int[-1]+1 ))
print(pages)


# ## 循环/遍历

# In[70]:


# global varialbes 
content_02 = dict()
main_content =""
element = None


# In[71]:


def process_pages (pages):
    for p in pages:
        print (p,end='\t')

        跳转_input = driver.find_element_by_xpath('//span[@class="weui-desktop-pagination__form"]/input')
        跳转_a = driver.find_element_by_xpath('//span[@class="weui-desktop-pagination__form"]/a')
        跳转_input.clear()
        跳转_input.send_keys(p)
        跳转_a.click()

        time.sleep(10*random())

        element = driver.find_element_by_xpath('//div[@class="inner_link_article_list"]')
        main_content = element.get_attribute('innerHTML')
        #print(main_content)
        content_02[p] = main_content


# In[72]:


process_pages(pages)


# In[ ]:


# current
p


# In[217]:


df = pd.DataFrame([content_01]).T
df.columns = ["html_snippets"]
df


# In[218]:


get_ipython().run_line_magic('store', 'content_02')
import pickle 
filehandler = open("content_02", 'wb') 
pickle.dump(html_raw, filehandler)


# In[219]:


df_out = df[~df.duplicated()]
print (len(df_out))
df[df.duplicated()]


# In[150]:


try_again = list(df[df.duplicated()].index)
print(try_again)
try_again = try_again + list (set(pages).difference(set(df.index.values)))
try_again


# ## 暂存档

# In[151]:


filename = fn ["output"] ["公众号_htm_snippets"] 
df_out.to_csv(filename.format(公众号=公众号), sep="\t", encoding="utf8")


# In[153]:


def parse_html_snippets(_snippet_):
    root = fromstring(_snippet_) 
    title = [x.text for x in root.xpath('//div[@class="inner_link_article_title"]')]
    create_time = [x.text for x in root.xpath('//div[@class="inner_link_article_date"]')]
    link = [x for x in root.xpath('//a/@href')]
    _df_ = pd.DataFrame({"title":title, "create_time": create_time, "link":link})
    return(_df_)
    
l_df = []
for p in pages:
    _df_ = parse_html_snippets(df.loc[p,"html_snippets"])
    print (len(_df_), end=",")
    l_df.append(_df_)


# In[154]:


df_url_out = pd.concat(l_df).reset_index(drop=True)
df_url_out.loc[0:10]


# In[155]:


df_url_out.tail(5)


# ## 输出

# In[444]:


df_account.columns.name = "rel_accounts"
df_o.columns.name = "url_cat"
df_stats.columns.name = "stats"


# In[445]:


_df_.columns.name


# In[446]:


# Get the xlsxwriter workbook and worksheet objects.  
with pd.ExcelWriter(fn["output"]["公众号_xlsx"].format(公众号=公众号)) as writer:
    workbook  = writer.book

    for _df_ in [df_account, df_o, df_stats]:
        _df_.to_excel(writer, sheet_name = _df_.columns.name)


# In[ ]:


"公众号_xlsx": "data_sets/{公众号}_url.xlsx" }  


# In[156]:


driver.quit()


# In[ ]:




