import requests, time, queue, json, os,random
from bs4 import BeautifulSoup
import pandas as pd
from pprint import pprint
from lxml import etree
import multiprocessing

import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.header import Header


from MongoDbHandler import MongoDbHandler

requests.packages.urllib3.disable_warnings()

# t = time.strftime("%Y%m%d-%H%M", time.localtime())
ymd = time.strftime("%Y%m%d", time.localtime())
html_dir = "网页{}".format(ymd)

def save_mongodb(j_dict):
    mongoSession = MongoDbHandler('127.0.0.1','admin', 'admin')#A为mongodb的name,B为用户名，C为密码
    result=mongoSession.insert_one("jk", "huamei", j_dict)#D为数据库名，E为集合名
    # mongoSession.insert_many("jk", "E", re_dict)#D为数据库名，E为集合名
    # result=mongoSession.find_all("jk", "huamei")[0]#D为数据库名，E为集合名
    # result.pop('_id')
    print(result)

class Baidu:
    def __init__(self):
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36",
        }
        self.url = "https://www.baidu.com/s"
        self.hm = "华美顾问"
        self.hm_gg = "华美顾问"

    def page(self, kw="酒店顾问"):
        for j in range(3):
            pn = j * 10
            data = {"wd": kw, "pn": pn}
            # data = {"wd": kw, "rn": 50}
            r = requests.get(self.url, params=data, headers=self.headers)
            con=r.text
            with open("{}/百度/百度_{}_{}.html".format(html_dir, kw, j + 1), "w", encoding="utf8") as f:
                f.write(con)

            soup = BeautifulSoup(con, "lxml")
            print("百度电脑端", j + 1, "-", soup.title.string)
            h3_li = soup.find_all("h3")

            for i, h3 in enumerate(h3_li):
                h3_text = h3.text.strip()
                if self.hm in h3_text or self.hm_gg in h3_text:
                    return str(pn + i + 1)
        else:
            return "50+"

    def run(self, kw_li, re_dict):
        kw_dict = {}
        for kw in kw_li:
            pm = self.page(kw)
            kw_dict.update({kw: pm})
        # print(kw_dict)
        re_dict["百度电脑端"] = kw_dict
        print("百度电脑端完成！")


class Baidu_APP:
    def __init__(self):
        
        self.headers = {
            "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1",
        }
        # self.url = "https://m.baidu.com/from=844b/s"
        # 出现乱码，原因是url错了
        self.url = "https://www.baidu.com/s"
        self.hm = "华美顾问"
        self.hm_gg = "华美顾问"

    def page(self, kw="酒店顾问"):
        pm='50+'
        try:
            for j in range(3):
                pn = j * 10
                data = {"word": kw, "pn": pn}
                # data = {"word": kw, "rn": 50}
                r = requests.get(self.url, params=data, headers=self.headers)
                con=r.text
                # con=r.content.decode('utf8')
                with open("{}/百度_手机/百度_{}_{}.html".format(html_dir, kw, j + 1),"w",encoding="utf8") as f:
                    f.write(con)

                soup = BeautifulSoup(con, "lxml")
                # print("百度手机端", 1, "-", soup.title.string ,r.encoding,r.status_code,r.url)
                print("百度手机端", j + 1, "-", soup.title.string)
                h3_li = soup.find_all("h3")

                for i, h3 in enumerate(h3_li):
                    h3_text = h3.text.strip()
                    if self.hm in h3_text or self.hm_gg in h3_text:
                        pm=str(pn + i + 1)
        except Exception as e:
            print("百度手机端url失败--", kw)
            print(e)
        finally:
            return pm

    def run(self, kw_li, re_dict):
        kw_dict = {}
        for kw in kw_li:
            pm = self.page(kw)
            kw_dict.update({kw: pm})
        # print(kw_dict)
        re_dict["百度手机端"] = kw_dict
        print("百度手机端完成！")


class Google:
    def __init__(self):
        # 随机UA，每次请求随机选一个，防止封ip
        self.user_agent_list = [
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1",
            "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
            "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1",
            "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
            "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
            "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
            "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3",
            "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
            "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24",
            "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36",
        ]
        self.hm_url = "huamei2001"
        self.url = "https://www.google.com/search"
        self.proxies = {
            "http": "http://127.0.0.1:10801",
            "https": "https://127.0.0.1:10801",
        }

    def page(self, kw="酒店顾问"):
        user_agent=random.choice(self.user_agent_list)
        headers = {
            "Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8,ja;q=0.7",
            # 出现utf8编码错误，原因是没有加referer参数
            "referer": "https://www.google.com/", 
            "User-Agent": user_agent,
        }
        data = {"q": kw, "num": 50, "hl": "zh-CN"}
        pm ='50+'
        try:
            r = requests.get(self.url, params=data, headers=headers, proxies=self.proxies,verify=False)
            with open("{}/谷歌/谷歌_{}_{}.html".format(html_dir, kw, 1), "w", encoding="utf8") as f:
                f.write(r.content.decode("utf8"))
            html = etree.HTML(r.text)
            title=html.xpath("//title/text()")[0]
            print('谷歌',1, kw,"-",title )
            if kw in title:
                h3url_li = html.xpath("//h3/../@href")
                # h3url_li = html.xpath("//div[@class='KJDcUb']/a/@href") # 手机端用这个
                for i, h3url in enumerate(h3url_li):
                    if self.hm_url in h3url:
                        pm=str(i + 1)
        except Exception as e:
            print("谷歌url失败--", kw)
            print(e)
        finally:
            return pm

    def run(self, kw_li, re_dict):
        kw_dict = {}
        for kw in kw_li:
            pm = self.page(kw)
            kw_dict.update({kw: pm})
            # time.sleep(30)  # 建议请求时间间隔>30s,防止反爬虫
        re_dict["谷歌电脑端"] = kw_dict
        re_dict["谷歌手机端"] = kw_dict
        print("谷歌完成！")


class Weibo:
    def __init__(self):
        self.url = "https://m.weibo.cn/api/container/getIndex"
        # https://m.weibo.cn/api/container/getIndex?containerid=100103type%3D1%26q%3D%E9%85%92%E5%BA%97&page_type=searchall
        self.hm = '"华美顾问"'
        self.hm1 = "'华美顾问'"
        self.headers = {
            "Accept": "application/json,text/plain,*/*",
            "MWeibo-Pwa": "1",
            "Referer": "https://m.weibo.cn/search?containerid=100103type%3D1%26q%3D%E9%85%92%E5%BA%97%E9%A1%BE%E9%97%AE",
            "Sec-Fetch-Mode": "cors",
            "User-Agent": "Mozilla/5.0(iPhone;CPUiPhoneOS11_0likeMacOSX)AppleWebKit/604.1.38(KHTML,likeGecko)Version/11.0Mobile/15A372Safari/604.1",
            "X-Requested-With": "XMLHttpRequest",
            "X-XSRF-TOKEN": "caffd8",
        }

    def page(self, kw="酒店顾问"):
        # 查2页，微博返回的是json数据，电脑端和手机端数据排名是一样的
        # 暂时没有做json数据data为空的反爬处理
        pm='50+'
        try:
            for j in range(1, 3):
                # datas={'q':kw,'Refer':'Sweibo_box'}
                datas = {
                    "containerid": "100103type=1&q={}".format(kw),
                    "page_type": "searchall",
                    "page": j,
                }
                r = requests.get(self.url, params=datas, headers=self.headers)
                print("微博：{}-{}".format(j, kw))
                # with open("{}/微博_{}_{}.json".format(html_dir, kw, j), "w") as f:
                #     f.write(r.text)
                con = r.json()
                with open("{}/微博_手机/微博_{}_{}.json".format(html_dir, kw, j), "w", encoding="utf8") as f:
                    f.write(json.dumps(con, indent=4, ensure_ascii=False))
                res_li=con["data"]["cards"]
                if res_li:
                    for i, m in enumerate(res_li):
                        n = str(m)
                        if self.hm in n or self.hm1 in n:
                            pm=str((j - 1) * 10 + i + 1)
        except Exception as e:
            print("微博url失败--", kw)
            print(e)
        finally:
            return pm

    def run(self, kw_li, re_dict):
        kw_dict = {}
        for kw in kw_li:
            pm = self.page(kw)
            kw_dict.update({kw: pm})
            # time.sleep(15)  # 建议请求时间间隔>30s,防止反爬虫
        re_dict["微博手机端"] = kw_dict
        re_dict["微博电脑端"] = kw_dict
        print("微博完成！")


class Zhihu:
    def __init__(self):
        self.url = "https://www.zhihu.com/api/v4/search_v3"
        # https://www.zhihu.com/api/v4/search_v3?t=general&q=%E9%82%AE%E7%AE%B1&correction=1&offset=0&limit=20&lc_idx=0&show_all_topics=0
        self.hm = '"hua-mei-gu-wen-24"'
        self.hm1 = "'hua-mei-gu-wen-24'"
        # name: "华美顾问"，url_token: "hua-mei-gu-wen-24"，name有可能有em，所以换用第二个属性判断
        # name: "<em>华美顾问</em>"，url_token: "hua-mei-gu-wen-24"
        self.headers = {
            "Accept": "application/json,text/plain,*/*",
            # "Referer": "https://www.zhihu.com/search?q=%E9%82%AE%E7%AE%B1&type=content",
            # 手机端ua
            "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1",
        }

    def page(self, kw="酒店顾问"):
        # 查2页，知乎返回的是json数据，每次请求20条，电脑端和手机端数据排名是一样的
        # 可以提取出对应的文章标题和作者，以作者为判断依据
        pm='50+'
        try:
            for j in range(1,3):
                datas = {
                    "q": kw,
                    "t": "general",
                    "offset": j * 20,  # 翻页属性，从第几条开始，默认0条
                    "limit": 20,  # 默认20
                }
                r = requests.get(self.url, params=datas, headers=self.headers)
                print("知乎：{}-{}".format(j, kw))
                con = r.json()
                with open("{}/知乎_手机/知乎_{}_{}.json".format(html_dir, kw, j),"w",encoding="utf8",) as f:
                    f.write(json.dumps(con, indent=4, ensure_ascii=False))
                res_li=con["data"]
                if res_li:
                    for i, m in enumerate(res_li):
                        # print(m)
                        n = str(m)
                        if self.hm in n or self.hm1 in n:
                            # print(self.hm in n,n,self.hm1 in n)
                            # c=con["data"]["cards"][i]["card_type"]
                            # if c == 9:
                            #     name = con["data"]["cards"][i]["mblog"]["user"]["screen_name"]
                            #     if name == self.hm:
                            pm=str((j-1) * 20 + i + 1)
        except Exception as e:
            print("知乎url失败--", kw)
            print(e)
        finally:
            return str(pm)

    def run(self, kw_li, re_dict):
        kw_dict = {}
        for kw in kw_li:
            pm = self.page(kw)
            kw_dict.update({kw:pm})
            # time.sleep(10)
        re_dict["知乎手机端"] = kw_dict
        re_dict["知乎电脑端"] = kw_dict
        print("知乎完成！")


class Toutiao_PC:
    def __init__(self):
        self.hm='华美顾问'
        self.url='https://www.toutiao.com/api/search/content/'
        self.headers={
            'accept':'application/json, text/javascript',
            'accept-encoding':'gzip, deflate, br',
            'accept-language':'zh-CN,zh;q=0.9,en;q=0.8,ja;q=0.7,en-US;q=0.6',
            'cache-control':'no-cache',
            'content-type':'application/x-www-form-urlencoded',
            # 'cookie':'tt_webid=6740943025142695431; s_v_web_id=414de9e183b9c0ede5efd02897c6fb63; WEATHER_CITY=%E5%8C%97%E4%BA%AC; __tasessionId=5dhr1s7g01569498118786; tt_webid=6740943025142695431; csrftoken=5e84327f8e9d9bda3fefd84dc08e0b48; RT="z=1&dm=toutiao.com&si=t8mi1e1h16o&ss=k10mnl95&sl=6&tt=fct&obo=2&nu=5f2ae476426021582acbbcbe42cce319&cl=dxmo&ld=dxms&r=9ae68c383d9be96dad46b5686046d969&ul=dxmu&hd=dxr0"',
            'cookie': 'tt_webid=6730868230266684932; WEATHER_CITY=%E5%8C%97%E4%BA%AC; tt_webid=6730868230266684932; csrftoken=0ee1c29623fbaf16be4f2d846aab784a; _ga=GA1.2.1829696456.1567411449; WIN_WH=1536_832; s_v_web_id=7d68b0820ff2e83ce5bf1877bc8ee6e3; RT="z=1&dm=toutiao.com&si=iyu7jgr4hrq&ss=k10e84u9&sl=1&tt=3qy&ld=58kmw&r=32aeb9eb56facd8304cfc1b363566bd2&ul=58kn1&hd=58knm"; __tasessionId=y749tador1569503233797',
            # 有cookie才会返回20条数据，内容和浏览器一样。否则每次返回10条数据，而且数据和浏览器不一样。
            'pragma':'no-cache',
            'referer':'https://www.toutiao.com/search/',
            'sec-fetch-mode':'cors',
            'sec-fetch-site':'same-origin',
            'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36',
            'x-requested-with':'XMLHttpRequest',
            }
        
    def page(self, kw="酒店顾问"):
        pm='50+'
        num=0
        try:
            for i in range(3):
            # 为了提高效率，所以减少了请求次数，只请求前3页（前3页已经够用了），如果嫌少，可以请求5页
            # 请求5次，每次20条，共计100条数据，但实际在浏览器显示的数据只有60条左右，因为其中有些是其他类别的数据
                params={
                    'aid':'24',
                    'app_name':'web_search',
                    'offset':i*20,
                    'format':'json',
                    'keyword':kw,
                    'autoload':'true',
                    'count':20,
                    'en_qc':'1',
                    'cur_tab':1,
                    'from':'search_tab',
                    'pd': 'synthesis',
                }
                r=requests.get(self.url,params=params,headers=self.headers)
                # print(r.status_code,r.url,r.headers,r.request.headers)
                print("今日头条电脑端：{}-{}".format(i+1, kw))
                # print(i,'次请求，此次数量',r.json()['count'],r.json()['offset'])
                con=r.json()
                # print(con)
                # with open('tt.html','w',encoding='utf8')as f:
                #     f.write(r.text)
                with open('{}/今日头条/今日头条_{}_{}.json'.format(html_dir, kw, i+1),'w',encoding='utf8')as f:
                    f.write(json.dumps(con,ensure_ascii=False,indent=4))
                data_li=con['data']
                if data_li:
                    for j,data in enumerate(data_li):
                        abstract=data.get('abstract')
                        title=data.get('title')
                        auther=data.get('source')
                        if abstract and title and auther:
                            num+=1
                            if auther==self.hm:
                                pm=num
                                break
                            # print(i*20+j,auther,title)
                            # print(i*10+j,abstract,'+',title,'+',auther)
                if pm!='50+':
                    break
        except Exception as e:
            print("今日头条电脑端url失败--", kw)
            print(e)
        finally:
            return str(pm)

    def run(self, kw_li, re_dict):
        kw_dict = {}
        for kw in kw_li:
            pm = self.page(kw)
            kw_dict.update({kw:pm})
            # time.sleep(10)
        re_dict["今日头条电脑端"] = kw_dict
        print("今日头条电脑端完成！")


class Toutiao_APP:
    def __init__(self):
        self.hm='华美顾问' #华美在头条的官方号
        self.hm_url='huamei2001.' # 华美的官网含有huamei2001，因为华美官网有huamei2001.com，huamei2001.net等
        self.url='https://m.toutiao.com/search/'
        self.headers={
            'User-Agent':'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1',
            'Host':'m.toutiao.com',
            'Referer': 'https://m.toutiao.com/search/',
        }

    def page(self, kw="酒店顾问"):
        pm='50+'
        try:
            for j in range(3):
            # 为了提高效率，所以减少了请求次数，只请求前3页（前3页已经够用了），如果嫌少，可以请求5页
            # 请求5页，数据共计50条左右
                pn = j * 10
                params = {"keyword": kw, 'count':10,"offset": pn,'start_index':pn}
                # data = {"word": kw, "rn": 50}
                r = requests.get(self.url, params=params, headers=self.headers)
                con=r.text
                with open("{}/今日头条_手机/今日头条手机_{}_{}.html".format(html_dir, kw, j + 1),"w",encoding="utf8") as f:
                    f.write(con)

                html=etree.HTML(con)
                wb_title=html.xpath('//title/text()')[0]
                print("今日头条手机端", j + 1, "-", wb_title)
                res_li=html.xpath('//div[@class="result-content"]')
                # print(res_li,type(res_li),len(res_li))

                for i, res in enumerate(res_li):
                    res_url=res.xpath('.//div[contains(@class,"ts-size14")]/span[1]/text()')
                    # res_url有3种情况：1为空[]，2为['www.fedint.com']，3为['[最佳回答]', '悟空问答']，用索引-1取最后一个值
                    if res_url:
                        # 判断官网，官方号
                        if self.hm_url in res_url[-1] or self.hm in res_url[-1]:
                            pm=str(pn+i+1)
        except Exception as e:
            print("今日头条手机端url失败--", kw)
            print(e)
        finally:
            return pm

    def run(self, kw_li, re_dict):
        kw_dict = {}
        for kw in kw_li:
            pm = self.page(kw)
            kw_dict.update({kw: pm})
        # print(kw_dict)
        re_dict["今日头条手机端"] = kw_dict
        print("今日头条手机端完成！")


# class Linkedin():
#     def __init__(self):
#         self.url='https://www.linkedin.com/mwlite/search/results/all'
#         self.hm='华美顾问'
#         self.headers={
#             'cookie': 'bcookie="v=2&93fdaca3-b373-4e7a-8681-3d6f71c35d28"; bscookie="v=1&2019092811214926184922-eec0-4d4c-8364-622029971d75AQEFrEhauWeiBAoagoi-RRw3wjAg2MNl"; _ga=GA1.2.377214128.1569669708; _gat=1; AMCVS_14215E3D5995C57C0A495C55%40AdobeOrg=1; AMCV_14215E3D5995C57C0A495C55%40AdobeOrg=-1303530583%7CMCIDTS%7C18168%7CMCMID%7C53491006891487876871671437302217651683%7CMCAAMLH-1570274508%7C11%7CMCAAMB-1570274508%7C6G1ynYcLPuiQxYZrsz_pkqfLG9yMXBpb2zX5dvJdYQJzPXImdj0y%7CMCOPTOUT-1569676908s%7CNONE%7CvVersion%7C3.3.0; aam_uuid=54012205828187279801728551448569431592; lil-lang=zh_CN; utag_main=v_id:016d779d389500161be06a0184c200087005d07f004bb$_sn:1$_se:1$_ss:1$_st:1569671655384$ses_id:1569669855384%3Bexp-session$_pn:1%3Bexp-session$vapi_domain:linkedin.com; pushPermState=default; appUpsellCoolOff=1569669921942; visit=v=1&M; JSESSIONID="ajax:6098893895897455442"; lissc1=1; lissc2=1; RT=s=1569669941239&r=https%3A%2F%2Fwww.linkedin.com%2Fstart%2Fjoin%3Ftrk%3Dguest_homepage-basic_nav-header-join; li_at=AQEDAS0342kDDObDAAABbXee5d4AAAFtm6tp3lEARHaJdL8xsjPZ9K-sIBb8PxqIDuJ7OyXdlCmASre2UlKcdBRgLn_EBASs-eAHKqvLW_MVnWg0l6z3XYIVWB0BjKXrmCCjG3Te89rQdrApJB3E3LvH; liap=true; sl=v=1&XobLR; li_cc=AQF9ajyFvnWGnAAAAW13nudpUr8PKwkufx8JiE2uuAa_U8u9i6QvGgSbS-m1SeNC7tY3wk7L8W_X; lang=v=2&lang=zh-cn; lidc="b=OGST09:g=1293:u=1:i={0:.0f}:t={0:.0f}:s=AQE3IpLZ9xmu0Uk8PEtrsZZKzfQlOQce"'.format(time.time()),
#             'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
#             'accept-encoding': 'gzip, deflate, br',
#             'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,ja;q=0.7,en-US;q=0.6',
#             'csrf-token': 'ajax:6098893895897455442',
#             'referer': 'https://www.linkedin.com/mwlite/search/results/all',
#             # 'referer': 'https://www.linkedin.com/mwlite/search/results/all?keywords=%E9%85%92%E5%BA%97%E9%A1%BE%E9%97%AE',
#             'user-agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1',
#         }
    
#     def page(self,kw='酒店顾问'):
#         pm='50+'
#         for i in range(2): #首页从0开始
#             params={
#                 'keywords':kw,
#                 'pageNumber':i
#             }
#             print(i,'开始')
#             r = requests.get(self.url, params=params, headers=self.headers)
#             con=r.content.decode('utf8')
#             print(r.status_code,r.url,con)
#             with open("{}/领英/领英_{}_{}.html".format(html_dir, kw, i + 1),"w",encoding="utf8") as f:
#                 f.write(con)

#             html=etree.HTML(con)
#             wb_title=html.xpath('//title/text()')[0]
#             print("领英手机端", i + 1, "-", wb_title)
#         return pm

#     def run(self, kw_li, re_dict):
#         kw_dict = {}
#         for kw in kw_li:
#             pm = self.page(kw)
#             kw_dict.update({kw: pm})
#         # print(kw_dict)
#         re_dict["领英手机端"] = kw_dict
#         print("领英手机端完成！")


class Linkedin_PC():
    def __init__(self):
        self.url='https://www.linkedin.com/voyager/api/search/blended'
        # https://www.linkedin.com/voyager/api/search/blended
        self.hm='华美顾问'
        self.hm1='华美酒店'
        self.headers={
            'cookie': 'bcookie="v=2&93fdaca3-b373-4e7a-8681-3d6f71c35d28"; bscookie="v=1&2019092811214926184922-eec0-4d4c-8364-622029971d75AQEFrEhauWeiBAoagoi-RRw3wjAg2MNl"; _ga=GA1.2.377214128.1569669708; _gat=1; AMCVS_14215E3D5995C57C0A495C55%40AdobeOrg=1; AMCV_14215E3D5995C57C0A495C55%40AdobeOrg=-1303530583%7CMCIDTS%7C18168%7CMCMID%7C53491006891487876871671437302217651683%7CMCAAMLH-1570274508%7C11%7CMCAAMB-1570274508%7C6G1ynYcLPuiQxYZrsz_pkqfLG9yMXBpb2zX5dvJdYQJzPXImdj0y%7CMCOPTOUT-1569676908s%7CNONE%7CvVersion%7C3.3.0; aam_uuid=54012205828187279801728551448569431592; lil-lang=zh_CN; utag_main=v_id:016d779d389500161be06a0184c200087005d07f004bb$_sn:1$_se:1$_ss:1$_st:1569671655384$ses_id:1569669855384%3Bexp-session$_pn:1%3Bexp-session$vapi_domain:linkedin.com; pushPermState=default; appUpsellCoolOff=1569669921942; visit=v=1&M; JSESSIONID="ajax:6098893895897455442"; lissc1=1; lissc2=1; RT=s=1569669941239&r=https%3A%2F%2Fwww.linkedin.com%2Fstart%2Fjoin%3Ftrk%3Dguest_homepage-basic_nav-header-join; li_at=AQEDAS0342kDDObDAAABbXee5d4AAAFtm6tp3lEARHaJdL8xsjPZ9K-sIBb8PxqIDuJ7OyXdlCmASre2UlKcdBRgLn_EBASs-eAHKqvLW_MVnWg0l6z3XYIVWB0BjKXrmCCjG3Te89rQdrApJB3E3LvH; liap=true; sl=v=1&XobLR; li_cc=AQF9ajyFvnWGnAAAAW13nudpUr8PKwkufx8JiE2uuAa_U8u9i6QvGgSbS-m1SeNC7tY3wk7L8W_X; lang=v=2&lang=zh-cn; lidc="b=OGST09:g=1293:u=1:i={0:.0f}:t={0:.0f}:s=AQE3IpLZ9xmu0Uk8PEtrsZZKzfQlOQce"'.format(time.time()), #用time时间戳，防止反爬
            'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
            'accept-encoding': 'gzip, deflate, br',
            'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,ja;q=0.7,en-US;q=0.6',
            'csrf-token': 'ajax:6098893895897455442',
            'referer': 'https://www.linkedin.com/mwlite/search/results/people',
            # 'referer': 'https://www.linkedin.com/mwlite/search/results/all?keywords=%E9%85%92%E5%BA%97%E9%A1%BE%E9%97%AE',
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36',
        }
    
    def page(self,kw='酒店顾问'):
        pm='50+'
        try:
            for i in range(1): #首页从0开始,第2页从10开始
                params={
                    'count':40, #count最大为40，可一次返回40条数据，这样只需请求1次，range写1就可以了。写50会返回0条数据，也就是不返回数据。
                    'q':'all',
                    'keywords':kw,
                    'start':i*10,
                    'filters':'List(resultType->PEOPLE)',
                    # 'filters':'List()',
                    'origin': 'HISTORY', # 必选参数
                    # 'queryContext': 'List(spellCorrectionEnabled->true,relatedSearchesEnabled->true,kcardTypes->PROFILE|COMPANY|JOB_TITLE)',
                }
                r = requests.get(self.url, params=params, headers=self.headers,timeout=15)
                con=r.json()
                # print(r.status_code,r.url)
                print("领英", i + 1, "---", kw)            
                with open("{}/领英/领英_{}_{}.json".format(html_dir, kw, i + 1),"w",encoding="utf8") as f:
                    f.write(json.dumps(con, indent=4, ensure_ascii=False))
                
                ele_li=con['elements']
                # print('ele_li',len(ele_li))
                num=0
                for ie,e in enumerate(ele_li):
                    # print('e',e)
                    t_li=e['elements']
                    # print('t_li',len(e['elements']))
                    if pm!='50+':
                        break
                    if ie %2 ==0:
                    # [0,2,4]为真实数据，[1,3]为其他数据
                        for it,t in enumerate(t_li):
                            company=t['headline']['text']
                            # print(company)
                            if self.hm in company or self.hm1 in company:
                                pm=str(i*10+it+1+num)
                                break
                        else:
                            # 1个页面多次循环json，如果没有找到目标公司，需要加上该次循环的列表长度，放在内部for循环后面
                            num+=len(t_li)
        except Exception as e:
            print("领英url失败--", kw)
            print(e)
        finally:
            return str(pm)

    def run(self, kw_li, re_dict):
        kw_dict = {}
        for kw in kw_li:
            pm = self.page(kw)
            kw_dict.update({kw: pm})
        # print(kw_dict)
        re_dict["领英电脑端"] = kw_dict
        re_dict["领英手机端"] = kw_dict
        print("领英电脑端完成！")
        print("领英手机端完成！")


class Twitter():
    def __init__(self):
        self.headers={
            'accept': 'application/json, text/javascript, */*; q=0.01',
            'accept-encoding': 'gzip, deflate, br',
            'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,ja;q=0.7,en-US;q=0.6',
            'Referer': 'https://twitter.com/search',
            # 'Referer': 'https://twitter.com/search?q=%E9%85%92%E5%BA%97%E9%A1%BE%E9%97%AE%E5%85%AC%E5%8F%B8&src=typd',
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36',
        }
        self.hm= "huamei2001" #twitter华美用户名huamei2001
        self.url = "https://twitter.com/search"
        self.proxies = {
            "http": "http://127.0.0.1:10801",
            "https": "https://127.0.0.1:10801",
        }

    def page(self, kw="酒店顾问"):
        pm ='50+'
        data = {"q": kw, 'src':'typd',}
        try:
            r = requests.get(self.url, params=data, headers=self.headers, proxies=self.proxies,verify=False)
            # con=r.text
            con=r.content.decode("utf8")
            with open("{}/twitter/twitter_{}_{}.html".format(html_dir, kw, 1), "w", encoding="utf8") as f:
                f.write(con)
            # with open("{}/twitter/twitter_{}_{}.json".format(html_dir, kw, 1), "w", encoding="utf8") as f:
            #     f.write(r.json())
            html = etree.HTML(r.text)
            title=html.xpath("//title/text()")[0]
            print('twitter', kw,1,"-",title,)
            # print(con)
            user_li=html.xpath('//span[contains(@class,"username")]/b/text()')
            # print(len(user_li))
            for j,user in enumerate(user_li):
                # print(user)
                if self.hm in user:
                    pm=str(j+1)
        except Exception as e:
            print("twitter请求url失败--", kw)
            print(e)
        finally:
            return pm

    def run(self, kw_li, re_dict):
        kw_dict = {}
        for kw in kw_li:
            pm = self.page(kw)
            kw_dict.update({kw: pm})
            # time.sleep(30)  # 建议请求时间间隔>30s,防止反爬虫
        re_dict["twitter电脑端"] = kw_dict
        re_dict["twitter手机端"] = kw_dict
        print("twitter完成！")



def get_file_list(file_path):
    dir_list = os.listdir(file_path)
    if not dir_list:
        return
    else:
        # 注意，这里使用lambda表达式，将文件按照最后修改时间顺序升序排列
        # os.path.getmtime() 函数是获取文件最后修改时间
        # os.path.getctime() 函数是获取文件最后创建时间
        dir_list = sorted(dir_list,  key=lambda x: os.path.getctime(os.path.join(file_path, x)))
        # print(dir_list)
        return dir_list

def send_mail(file,file_all,t_all):
    # 第三方 SMTP 服务
    mail_host="smtp.qq.com"  #设置服务器
    mail_user="1216887433@qq.com"    #用户名
    mail_pass="rqfruulyrbenhhab"   #口令

    sender = '1216887433@qq.com'
    receivers = ['1216887433@qq.com']  # 接收邮件，可设置为你的QQ邮箱或者其他邮箱
    # receivers = ['1216887433@qq.com','cuilinlin@huamei2001.com']  # 接收邮件，可设置为你的QQ邮箱或者其他邮箱

    #创建一个带附件的实例
    message = MIMEMultipart()
    message['From'] = Header("rpa机器人", 'utf-8')
    message['To'] =  Header("华美", 'utf-8')
    subject = '搜索监控'
    message['Subject'] = Header(subject, 'utf-8')

    #邮件正文内容
    message.attach(MIMEText('搜索监控结果表格，总计耗时{:.0f}秒'.format(t_all), 'plain', 'utf-8'))

    # 构造附件1，传送当前目录下的 test.txt 文件
    att1 = MIMEText(open(file_all, 'rb').read(), 'base64', 'utf-8')
    att1["Content-Type"] = 'application/octet-stream'
    # 这里的filename可以任意写，写什么名字，邮件中显示什么名字
    att1["Content-Disposition"] = 'attachment; filename="{}"'.format(file)
    message.attach(att1)

    try:
        smtpObj = smtplib.SMTP_SSL(mail_host,465)
        # smtpObj.connect(mail_host)    # 25 为 SMTP 端口号
        smtpObj.login(mail_user,mail_pass)
        smtpObj.sendmail(sender, receivers, message.as_string())
        print ("邮件发送成功")
    except Exception as e:
        print("Error: 无法发送邮件")
        print(e)


def day_dir():
    if not os.path.exists(html_dir):
        os.mkdir(html_dir)
        os.mkdir(html_dir + "/百度")
        os.mkdir(html_dir + "/百度_手机")
        os.mkdir(html_dir + "/谷歌")
        os.mkdir(html_dir + "/谷歌_手机")
        os.mkdir(html_dir + "/微博_手机")
        os.mkdir(html_dir + "/知乎_手机")
        os.mkdir(html_dir + "/今日头条")
        os.mkdir(html_dir + "/今日头条_手机")
        os.mkdir(html_dir + "/领英")
        os.mkdir(html_dir + "/微信")
        os.mkdir(html_dir + "/facebook")
        os.mkdir(html_dir + "/twitter")


def read_kw():
    day_dir()
    path = r"c:\搜索排名\搜索排名2.xlsx"
    df = pd.read_excel(path, header=0)
    # print(df[['关键词','百度排名']])
    kw_li_all = df.loc[:, "关键词"].tolist()
    # kw_li_all=df.loc[:,'关键词'].tolist()[:-3]
    kw_li = []
    for i in kw_li_all:
        if type(i) == str and not i.endswith("类"):
            kw_li.append(i)
    # print(kw_li)
    return kw_li, df


def main():
    t_start=time.time()
    manager = multiprocessing.Manager()
    re_dict = manager.dict()
    kw_li, df_dt = read_kw()
    # kw_li=['精品酒店策划']

    baidu = Baidu()
    baidu_app = Baidu_APP()
    google = Google()
    weibo = Weibo()
    zhihu=Zhihu()
    toutiao=Toutiao_PC()
    toutiao_app=Toutiao_APP()
    linkedin=Linkedin_PC()
    twitter=Twitter()
    
    p1=multiprocessing.Process(target=baidu.run,args=(kw_li,re_dict))
    p2=multiprocessing.Process(target=baidu_app.run,args=(kw_li,re_dict))
    p3 = multiprocessing.Process(target=google.run, args=(kw_li, re_dict))
    p5=multiprocessing.Process(target=weibo.run,args=(kw_li,re_dict))
    p6=multiprocessing.Process(target=zhihu.run,args=(kw_li,re_dict))
    p7=multiprocessing.Process(target=toutiao.run,args=(kw_li,re_dict))
    p8=multiprocessing.Process(target=toutiao_app.run,args=(kw_li,re_dict))
    p9=multiprocessing.Process(target=linkedin.run,args=(kw_li,re_dict))
    p10=multiprocessing.Process(target=twitter.run,args=(kw_li,re_dict))

    p3.start()
    p10.start()
    p1.start()
    p2.start()
    p5.start()
    p6.start()
    p7.start()
    p8.start()
    p9.start()



    p1.join()
    p2.join()
    p5.join()
    p6.join()
    p7.join()
    p8.join()
    p9.join()
    p10.join()
    p3.join()

    # re_dict['time']=t
    print(re_dict,type(re_dict))
    j_dict=dict(re_dict)
    ymd_hm = time.strftime("%Y%m%d-%H%M", time.localtime())
    j_dict['time']=ymd_hm
    try:
        save_mongodb(j_dict)
    except Exception as e:
        print(e)

    for key,value in re_dict.items():
        # print(key,value)
        for j in value:
            df_dt.loc[df_dt["关键词"] == j, key] = value[j]
    print(df_dt)

    hour_m=time.strftime("%H-%M", time.localtime())
    df_dt.to_excel("c:\搜索排名\单天结果\搜索排名结果{}.xlsx".format(ymd), sheet_name=hour_m,index=False)

    file_list = get_file_list(r'c:\搜索排名\单天结果')
    day_li = [file[10:14] for file in file_list]
    df0 = pd.read_excel(r'c:\搜索排名\搜索排名.xlsx', header=0, index_col='关键词')
    pt_li = df0.columns.tolist()
    gjc_li = []
    for gjc in df0.index.tolist():
        if gjc and type(gjc)==str:
            gjc_li.append(gjc)
    df=pd.DataFrame(df0,columns = pd.MultiIndex.from_product([pt_li,day_li]))
    # print(df)

    for file in file_list:
        day=file[10:14]
        df1 = pd.read_excel(r'c:\搜索排名\单天结果\\'+file,header=0,index_col=0)
        for pt in pt_li:
            try:
                if df1[pt].isnull().all():
                    continue
                df.loc[:,(pt, day)]=df1.loc[:,[pt]].values.tolist()
            except Exception as e:
                print(e)

    file='result{}.xlsx'.format(ymd)
    file_all='c:\搜索排名\汇总结果\{}'.format(file)
    print(df)
    df.to_excel(file_all,sheet_name=hour_m)

    t_end=time.time()
    t_all=t_end-t_start
    print('总计耗时{:.0f}秒'.format(t_all))
#     with open('耗时.txt','a',encoding='utf8')as f:
#         f.write('{}，总计耗时{:.0f}秒。\n'.format(t,t_all))


    # file_all='c:\搜索排名\汇总结果\搜索排名汇总结果20190923-1321.xlsx'
    send_mail(file,file_all,t_all)


if __name__ == "__main__":
    main()
