# -*- coding: utf-8 -*-
"""
@Time ： 2021/1/25 一缕青丝伴忧愁:26
@Auth ： 张张呀
@File ：百度_seo.py
@IDE ：PyCharm
@Motto：ABC(Always Be Coding)

"""
import re
import time
import urllib

import requests
import xlwt
from lxml import etree

headers = {
    'Cookie': 'qHistory=aHR0cDovL3N0b29sLmNoaW5hei5jb20vYmFpZHUvd29yZHMuYXNweCvnmb7luqblhbPplK7or43mjJbmjpg=; Hm_lvt_ca96c3507ee04e182fb6d097cb2a1a4c=1606810942; UM_distinctid=1761d653c765ec-0c11df9176f618-c791e37-1fa400-1761d653c77d7d; CNZZDATA5082706=cnzz_eid%3D1823973152-1606806375-null%26ntime%3D1606806375; Hm_lpvt_ca96c3507ee04e182fb6d097cb2a1a4c=1606810948',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
    'Accept': 'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
    "Connection": "keep-alive",
    "Accept-Encoding": "gzip, deflate, br",
    "Accept-Language": "zh-CN,zh;q=0.9"
}


# 查询关键词是否能找到相关的关键字
def search_keyword(keyword):
    data = {
        'kw': keyword,
    }
    url = "http://stool.chinaz.com/baidu/words.aspx"
    html = requests.post(url, data=data, headers=headers).text
    # print(html)
    con = etree.HTML(html)
    key_result = con.xpath('//*[@id="pagedivid"]/li/div/text()')
    try:
        if (key_result[0] == "抱歉，未找到相关的数据。"):
            print('\n')
            print("没有找到相关长尾词，请换一个再试。")
            return False
    except:
        return True


# 获取关键词页码数和记录条数
def get_page_number(keyword):
    url = "https://data.chinaz.com/keyword/allindex/" + keyword
    html = requests.get(url, headers=headers).text
    page_number = re.compile(r'共(.*?)页').findall(html)[0]
    print(page_number)
    print('准备开始挖掘关键词' + keyword + '的长尾关键词')
    return page_number


# 获取关键词数据
def get_keyword_datas(keyword, page_number):
    keyword = urllib.parse.quote(keyword)
    datas_list = []
    for i in range(1, page_number + 1):
        print(f'正在采集第{i}页关键词{keyword}挖掘数据...')
        print('\n')
        url = f"https://data.chinaz.com/keyword/allindex/{keyword}/{i}"
        print(url)
        html = requests.get(url, headers=headers).text
        con = etree.HTML(html)
        key_words = con.xpath('//*[@id="pagedivid"]/ul/li[2]/a/text()')  # 关键词
        # print(key_words)
        overall_indexs = con.xpath('//*[@id="pagedivid"]/ul/li[6]/a/text()')  # 全网指数
        chagnwei_indexs = con.xpath('//*[@id="pagedivid"]/ul/li[4]/a/text()')  # 长尾词数
        jingjias = con.xpath('//*[@id="pagedivid"]/ul/li[5]/a/text()')  # 竞价数
        collections = con.xpath('//*[@id="pagedivid"]/ul/li[3]/text()')  # 收录量  //*[@id="pagedivid"]/ul[3]/li[9]
        jingzhengs = con.xpath('//*[@id="pagedivid"]/ul/li[一缕青丝伴忧愁]/text()')  # 竞争度 //*[@id="pagedivid"]/ul[3]/li[一缕青丝伴忧愁]
        pclius = con.xpath('//*[@id="pagedivid"]/ul/li[8]/text()')  # pc日均流量
        mobilelius = con.xpath('//*[@id="pagedivid"]/ul/li[9]/text()')  # 移动日均流量
        semjias = con.xpath('//*[@id="pagedivid"]/ul/li[7]/text()')  # sem价格
        data_list = []
        for key_word, overall_index, chagnwei_index, jingjia, collection, jingzheng, pcliu, mobileliu, semjia in zip(
                key_words, overall_indexs, chagnwei_indexs, jingjias, collections, jingzhengs, pclius, mobilelius,
                semjias):
            # 这是一行数据
            data = [
                key_word,
                overall_index,
                chagnwei_index,
                jingjia,
                collection,
                jingzheng,
                pcliu,
                mobileliu,
                semjia
            ]
            print(data)
            data_list.append(data)
        datas_list.extend(data_list)  # 合并关键词数据
        print('拼装后：')
        print(datas_list)
        print('\n')
        time.sleep(3)
    return datas_list


# 保存关键词数据为excel格式
def bcsj(keyword, data):
    workbook = xlwt.Workbook(encoding='utf-8')
    booksheet = workbook.add_sheet('Sheet 1', cell_overwrite_ok=True)
    title = [['关键词', '全网指数', '长尾词数', '竞价数', '收录量', '竞争度', 'pc日均流量', '移动日均流量', 'sem价格']]
    title.extend(data)
    for i, row in enumerate(title):
        for j, col in enumerate(row):
            booksheet.write(i, j, col)
    workbook.save(f'{keyword}.xls')
    print(f"保存数据为 {keyword}.xls 成功！")


if __name__ == '__main__':
    keyword = input('请输入关键词>>')
    print('正在查询，请稍后...')
    result = search_keyword(keyword)
    if result:
        page_number = get_page_number(keyword)
        print('一共找到' + page_number + '页')
        input_page_num = input('请输入你想采集的页数>>')
        if int(input_page_num) > int(page_number):
            page_number = int(page_number)
        else:
            page_number = int(input_page_num)
        print('\n')
        print('正在采集关键词挖掘数据，请稍后...')
        print('\n')
        datas_list = get_keyword_datas(keyword, page_number)
        print('\n========================采集结束========================\n')
        bcsj(keyword, datas_list)
