
class Baidu:
    def __init__(self):
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36",
        }
        self.url = "https://www.baidu.com/s"
        self.hm = "华美顾问"
        self.hm_gg = "华美顾问"

    def page(self, kw="酒店顾问"):
        for j in range(3):
            pn = j * 10
            data = {"wd": kw, "pn": pn}
            # data = {"wd": kw, "rn": 50}
            r = requests.get(self.url, params=data, headers=self.headers)
            con=r.text
            with open("{}/百度/百度_{}_{}.html".format(html_dir, kw, j + 1), "w", encoding="utf8") as f:
                f.write(con)

            soup = BeautifulSoup(con, "lxml")
            print("百度电脑端", j + 1, "-", soup.title.string)
            h3_li = soup.find_all("h3")

            for i, h3 in enumerate(h3_li):
                h3_text = h3.text.strip()
                if self.hm in h3_text or self.hm_gg in h3_text:
                    return str(pn + i + 1)
        else:
            return "50+"

    def run(self, kw_li, re_dict):
        kw_dict = {}
        for kw in kw_li:
            pm = self.page(kw)
            kw_dict.update({kw: pm})
        # print(kw_dict)
        re_dict["百度电脑端"] = kw_dict
        print("百度电脑端完成！")


class Baidu_APP():
    def __init__(self):
        self.headers = {
            "User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 11_0 like Mac OS X) AppleWebKit/604.1.38 (KHTML, like Gecko) Version/11.0 Mobile/15A372 Safari/604.1",
        }
        # self.url = "https://m.baidu.com/from=844b/s"
        # 出现乱码，原因是url错了
        self.url = "https://www.baidu.com/s"
        self.hm = "华美顾问"
        self.hm_gg = "华美顾问"

    def page(self, kw):
        pm = '50+'
        try:
            for j in range(3):
                pn = j * 10
                data = {"word": kw, "pn": pn}
                # data = {"word": kw, "rn": 50}
                r = requests.get(self.url, params=data, headers=self.headers)
                con = r.text
                # con=r.content.decode('utf8')
                # with open("{}/百度_手机/百度_{}_{}.html".format(html_dir, kw, j + 1),"w",encoding="utf8") as f:
                #     f.write(con)

                html = etree.HTML(con)
                wb_title = html.xpath('//title/text()')[0]
                print("百度手机端", j + 1, "-", wb_title)

                h3_li = html.xpath('//div[@class="c-result result"]')
                for i, h3 in enumerate(h3_li):
                    h3_text_li = h3.xpath('.//h3//text()')
                    # print(h3_text_li)
                    for h in h3_text_li:
                        if '\n' in h:
                            h3_text_li.remove(h)
                    h3_text = ''.join(h3_text_li)
                    # print(h3_text)
                    if self.hm in h3_text or self.hm_gg in h3_text:
                        pm = str(pn + i + 1)
        except Exception as e:
            print("百度手机端url失败--", kw)
            print(e)
        finally:
            return pm

    def save_mongodb(self, kw_dict):
        mongoSession.insert_one(db, collection, kw_dict)

    def run(self, kw_li):
        for kw in kw_li:
            pm = self.page(kw)
            t = time.time()
            kw_dict = {}
            kw_dict["平台"] = '百度手机端'
            kw_dict["公司"] = company
            kw_dict['time'] = int(t)
            kw_dict['关键词'] = kw
            kw_dict['排名'] = pm
            # print(kw_dict)
            self.save_mongodb(kw_dict)
        print("百度手机端完成！")



class Linkedin_PC():
    def __init__(self):
        self.url = 'https://www.linkedin.com/voyager/api/search/blended'
        # https://www.linkedin.com/voyager/api/search/blended
        self.hm = '华美顾问'
        self.hm1 = '华美酒店'
        self.headers = {
            # 用time时间戳，防止反爬
            'cookie': 'bcookie="v=2&93fdaca3-b373-4e7a-8681-3d6f71c35d28"; bscookie="v=1&2019092811214926184922-eec0-4d4c-8364-622029971d75AQEFrEhauWeiBAoagoi-RRw3wjAg2MNl"; _ga=GA1.2.377214128.1569669708; _gat=1; AMCVS_14215E3D5995C57C0A495C55%40AdobeOrg=1; AMCV_14215E3D5995C57C0A495C55%40AdobeOrg=-1303530583%7CMCIDTS%7C18168%7CMCMID%7C53491006891487876871671437302217651683%7CMCAAMLH-1570274508%7C11%7CMCAAMB-1570274508%7C6G1ynYcLPuiQxYZrsz_pkqfLG9yMXBpb2zX5dvJdYQJzPXImdj0y%7CMCOPTOUT-1569676908s%7CNONE%7CvVersion%7C3.3.0; aam_uuid=54012205828187279801728551448569431592; lil-lang=zh_CN; utag_main=v_id:016d779d389500161be06a0184c200087005d07f004bb$_sn:1$_se:1$_ss:1$_st:1569671655384$ses_id:1569669855384%3Bexp-session$_pn:1%3Bexp-session$vapi_domain:linkedin.com; pushPermState=default; appUpsellCoolOff=1569669921942; visit=v=1&M; JSESSIONID="ajax:6098893895897455442"; lissc1=1; lissc2=1; RT=s=1569669941239&r=https%3A%2F%2Fwww.linkedin.com%2Fstart%2Fjoin%3Ftrk%3Dguest_homepage-basic_nav-header-join; li_at=AQEDAS0342kDDObDAAABbXee5d4AAAFtm6tp3lEARHaJdL8xsjPZ9K-sIBb8PxqIDuJ7OyXdlCmASre2UlKcdBRgLn_EBASs-eAHKqvLW_MVnWg0l6z3XYIVWB0BjKXrmCCjG3Te89rQdrApJB3E3LvH; liap=true; sl=v=1&XobLR; li_cc=AQF9ajyFvnWGnAAAAW13nudpUr8PKwkufx8JiE2uuAa_U8u9i6QvGgSbS-m1SeNC7tY3wk7L8W_X; lang=v=2&lang=zh-cn; lidc="b=OGST09:g=1293:u=1:i={0:.0f}:t={0:.0f}:s=AQE3IpLZ9xmu0Uk8PEtrsZZKzfQlOQce"'.format(time.time()),
            'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
            'accept-encoding': 'gzip, deflate, br',
            'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,ja;q=0.7,en-US;q=0.6',
            'csrf-token': 'ajax:6098893895897455442',
            'referer': 'https://www.linkedin.com/mwlite/search/results/people',
            # 'referer': 'https://www.linkedin.com/mwlite/search/results/all?keywords=%E9%85%92%E5%BA%97%E9%A1%BE%E9%97%AE',
            'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36',
        }

    def page(self, kw):
        pm = '50+'
        n=0
        try:
            for i in range(1):  # 首页从0开始,第2页从10开始
                params = {
                    'count': 40,  # count最大为40，可一次返回40条数据，这样只需请求1次，range写1就可以了。写50会返回0条数据，也就是不返回数据。
                    'q': 'all',
                    'keywords': kw,
                    'start': i*10,
                    'filters': 'List(resultType->PEOPLE)',
                    # 'filters':'List()',
                    'origin': 'HISTORY',  # 必选参数
                    # 'queryContext': 'List(spellCorrectionEnabled->true,relatedSearchesEnabled->true,kcardTypes->PROFILE|COMPANY|JOB_TITLE)',
                }
                r = requests.get(self.url, params=params,
                                 headers=self.headers, timeout=15)
                con = r.json()
                # print(r.status_code,r.url)
                print("领英:", kw,i + 1)
                with open("领英_{}_{}.json".format(kw, i + 1),"w",encoding="utf8") as f:
                    f.write(json.dumps(con, indent=4, ensure_ascii=False))

                ele_li = con['elements']
                for ele in ele_li:
                    if ele['type']=='SEARCH_HITS':
                        for el in ele['elements']:
                            if el['type']=='PROFILE':
                                n+=1
                                user_job=el['headline']['text']
                                print(n,user_job)
                                if self.hm in user_job or self.hm1 in user_job:
                                    pm=str(n)
                                    break

                # print('ele_li',len(ele_li))
                num = 0
                for ie, e in enumerate(ele_li):
                    # print('e',e)
                    t_li = e['elements']
                    # print('t_li',len(e['elements']))
                    if pm != '50+':
                        break
                    if ie % 2 == 0:
                        # [0,2,4]为真实数据，[1,3]为其他数据
                        for it, t in enumerate(t_li):
                            company = t['headline']['text']
                            # print(company)
                            if self.hm in company or self.hm1 in company:
                                pm = str(i*10+it+1+num)
                                break
                        else:
                            # 1个页面多次循环json，如果没有找到目标公司，需要加上该次循环的列表长度，放在内部for循环后面
                            num += len(t_li)
        except Exception as e:
            print("领英url失败--", kw)
            print(e)
        finally:
            return str(pm)

    def save_mongodb(self, kw_dict, kw_dict_app):
        mongoSession.insert_one(db, collection, kw_dict)
        mongoSession.insert_one(db, collection, kw_dict_app)

    def run(self, kw_li):
        for kw in kw_li:
            pm = self.page(kw)
            t = time.time()
            kw_dict = {}
            kw_dict["平台"] = '领英电脑端'
            kw_dict["公司"] = company
            kw_dict['time'] = int(t)
            kw_dict['关键词'] = kw
            kw_dict['排名'] = pm
            kw_dict_app = copy.deepcopy(kw_dict)
            kw_dict_app["平台"] = '领英手机端'
            print(kw_dict)
            # self.save_mongodb(kw_dict, kw_dict_app)
            # time.sleep(20)  # 建议请求时间间隔>20s,防止反爬虫
        print("领英电脑端完成！")
        print("领英手机端完成！")



def get_kw():

