# coding:utf-8
import requests
import re
import time
from DBCenter import Mongo_DB
from Browser import RequestInfo


def get_content_str(list_a):
    # 将list整理成str并去掉<>及里面的所有内容
    str_a = ""
    for i in list_a:
        str_a += i
    new_str = ''
    keep = 1
    for i in str_a:
        if i == '<':
            keep = 0
        elif i == '>':
            keep = 1
        elif keep == 1:
            new_str += i
    str_a = ''
    return new_str.replace('&nbsp', '')


def get_str(list_a):
    # 将list整理成str并去掉<>及里面的所有内容
    str_a = ""
    for i in list_a:
        str_a += i
    new_str = ''
    keep = 1
    for i in str_a:
        if i == '<':
            keep = 0
        elif i == '>':
            keep = 1
        elif keep == 1:
            new_str += i
    str_a = ''
    for i in new_str.replace('&nbsp', ''):
        if i == '-':
            break
        str_a += i
    return str_a
    # return str_a.replace('<em>', '').replace('</em>', '').replace('<span>', '').replace('</span>', '').replace('&nbsp',
    #                                                                                                            '').replace(
    #     '<spanclass="newTimeFactor_before_absm">', '')


class Baidu(object):
    "百度爬虫"

    def __init__(self):
        """初始化"""
        # 当前是第几个百度快照，用以获取百度快照的排名
        self.baidu_snapshot = 0
        self.baidu_snapshot_2 = 1
        # 百度为您找到相关结果的数量
        self.search_mun = 0
        self.save_num_2 = 0
        # 检查的数量
        self.check_num = 0
        # 储存的数量
        self.save_num = 0
        # 要爬取的页
        self.check_page = []
        # 爬取失败的页
        self.failed_page = []

    def get_baidu_html(self, word=None, page=1, ):
        "百度爬虫"

        # word = 'Mj3u4rBG'
        # 获取识别码
        unique_code = word.split(' ')[-1]

        # 由于百度搜索规则的优化只搜索唯一码
        word = unique_code

        # 百度url上的规则： (':'是'%3A')(' '是'%20')
        word_baidu = word.replace(':', '%3A').replace(' ', '%20')

        url, headers, data = RequestInfo.get_baidu_search_header(word=word, page=page)

        response = requests.get(url=url, headers=headers)  # , data=data)

        # with open('baidu+' + str(page) + '.html', 'w', encoding='utf-8')as f:
        #     f.write(response.text)

        html = response.text.replace(' ', '').replace('\n', '').replace('\t', '')

        if page == 1:
            # 获取百度说有几条数据
            self.search_mun = int(re.findall("""百度为您找到相关结果约(.*?)个""", html)[0].replace(',', ''))
            print(self.search_mun)
            # self.search_mun = 41
            if self.search_mun % 10 > 0:
                num = self.search_mun + 20;
            else:
                num = self.search_mun + 10
            self.check_page = [i for i in range(0, num // 10) if i < 100 and i > 1]
            print("当前条目在百度上检索到%s条" % self.search_mun)

        all_the_html_banner = re.findall("""<divclass="resultc-container"id="(.*?)百度快照</a></div></div>""", html)
        postion = 0
        saving = []

        for i in all_the_html_banner:
            postion += 1
            # print(i)

            self.baidu_snapshot += 1

            # 匹配题目内容和唯一码
            content = re.findall('<divclass="c-abstract">(.*?)</div>', i)
            content = get_content_str(content)
            # print(word, content)
            # print((word not in content))

            if word not in content:
                continue

            # 获取标题
            title = re.findall('target="_blank">(.*?)</a>', i)
            # print(title)
            title = get_str(title)

            print(word + '当前是第' + str(self.baidu_snapshot) + '个百度快照' + 'page：' + str(page))
            print('题目是：' + title)
            self.baidu_search_Chinese(word=title, unique_code=unique_code)

            # 生成预储存字典
            self.save_num += 1
            saving.append({unique_code: title, 'page': str(page), "ranking": str(self.baidu_snapshot)})
            # saving.append({'unique_code',unique_code,""})

        # 存入数据
        Mongo_DB.save(saving)

        # a = response.cookies
        # print(type(str(a)))
        # 判断下一页

        try:
            if "下一页" in re.findall("""<divid="page">(.*?)</div>""", html)[0]:
                print('有下一页')
                return '有下一页'
            else:
                print('没下一页')
                return '没下一页'
        except:
            return "爬取失败"

    def start_baidu_search_unique_code(self, word=None):
        "启动条词爬虫"
        # word = 'Mj3u4rBG'
        # 清理条词

        if not word: print('未填写搜索词');return '未填写搜索词'

        next_page = self.get_baidu_html(word=word)
        if next_page == "没下一页":
            self.check_page = []
        print('self.check_page', self.check_page)
        print("", self.check_page)

        for page in self.check_page:

            next_page = self.get_baidu_html(word=word, page=page)
            if next_page == '没下一页': break
            if next_page == '爬取失败':
                next_page = self.get_baidu_html(word=word, page=page)
                if next_page == '没下一页': break
                if next_page == '爬取失败': self.failed_page.append(page)

        # 记录这个词存了几个
        data = {word: self.save_num}
        Mongo_DB.save_tasks(data)

    def baidu_search_Chinese(self, word=None, unique_code=None, page=1):
        "使用百度对汉语进行搜索"
        # word = 'Mj3u4rBG'
        # 获取识别码
        # unique_code = word.split(' ')[-1]

        # 由于百度搜索规则的优化只搜索唯一码
        # word = unique_code

        # 百度url上的规则： (':'是'%3A')(' '是'%20')
        word_baidu = word.replace(':', '%3A').replace(' ', '%20')

        url, headers, data = RequestInfo.get_baidu_search_header(word=word, page=page)

        print(url, headers)

        response = requests.get(url=url, headers=headers)  # , data=data)

        # with open('baidu+' + str(page) + '.html', 'w', encoding='utf-8')as f:
        #     f.write(response.text)

        html = response.text.replace(' ', '').replace('\n', '').replace('\t', '')

        if page == 1:
            # 获取百度说有几条数据
            self.search_mun = int(re.findall("""百度为您找到相关结果约(.*?)个""", html)[0].replace(',', ''))
            print(self.search_mun)
            # self.search_mun = 41
            if self.search_mun % 10 > 0:
                num = self.search_mun + 20;
            else:
                num = self.search_mun + 10
            self.check_page = [i for i in range(0, num // 10) if i < 100 and i > 1]
            print("当前条目在百度上检索到%s条" % self.search_mun)

        all_the_html_banner = re.findall("""<divclass="resultc-container"id="(.*?)百度快照</a></div></div>""", html)
        postion = 0
        saving = []

        for i in all_the_html_banner:
            postion += 1
            # print(i)

            self.baidu_snapshot_2 += 1

            # 匹配题目内容和唯一码
            content = re.findall('<divclass="c-abstract">(.*?)</div>', i)
            content = get_content_str(content)
            # print(word, content)
            # print((word not in content))

            if unique_code not in content:
                continue

            # 获取标题
            title = re.findall('target="_blank">(.*?)</a>', i)
            # print(title)
            title = get_str(title)

            print(word + '当前是第' + str(self.baidu_snapshot_2) + '个百度快照' + 'page：' + str(page))
            print('题目是：' + title)

            # 生成预储存字典
            self.save_num_2 += 1
            saving.append(
                {'chinese': title, 'unique': unique_code, 'page': str(page), "ranking": postion})
            # saving.append({'unique_code',unique_code,""})

        # 存入数据
        Mongo_DB.save_to_data_has(saving)

        # a = response.cookies
        # print(type(str(a)))
        # 判断下一页

        try:
            if "下一页" in re.findall("""<divid="page">(.*?)</div>""", html)[0]:
                print('有下一页')
                return '有下一页'
            else:
                print('没下一页')
                return '没下一页'
        except:
            return "爬取失败"

    # input('按任意键退出')


if __name__ == '__main__':
    b = Baidu()
    b.baidu_search_Chinese(word='优质的圆球吸顶灯', unique_code='DzJfsXIO', page=1)

# if __name__ == '__main__':
#     # for i in range(1000):
#     s = '59p5eN5f'.replace('"', '')
#     nextpage = Baidu.get_baidu_html(s)
#
#     page = 2
#     while 1:
#         if nextpage == '有下一页':
#             nextpage = Baidu.get_baidu_html(s, page=page)
#             page += 1
#             time.sleep(1)
#             print("fuck", nextpage)
#         elif page == 5:
#             break
#         else:
#             break
