import csv
import requests
from bs4 import BeautifulSoup
import re
import queue
import os
import BloomFilter
import threading
import random
import traceback
import jieba
from typing import List


# 获取代理IP
def get_proxies_Https():
    with open('Https.txt', 'r') as f:
        result = f.readlines()  # 读取所有行并返回列表
    proxy_ip = random.choice(result)[:-1]  # 获取了所有代理IP
    L = proxy_ip.split(':')
    proxy_ip = {
        # 'http': 'http://{}:{}'.format(L[0], L[1]),
        'https': 'https://{}:{}'.format(L[0], L[1])
    }
    return proxy_ip


# 获取代理IP
def get_proxies_Http():
    with open('IPPool.txt', 'r') as f:
        result = f.readlines()  # 读取所有行并返回列表
    proxy_ip = random.choice(result)[:-1]  # 获取了所有代理IP
    L = proxy_ip.split(':')
    proxy_ip = {
        'http': 'http://{}:{}'.format(L[0], L[1]),
        # 'https': 'https://{}:{}'.format(L[0], L[1])
    }
    return proxy_ip


class Qsbk(object):
    def __init__(self):

        # 实例化队列，用来存放内容
        self.i = 1
        self.q = queue.Queue()

        # 布隆过滤器
        self.n = 100000  # 爬取网页的总数量(不重复)
        self.p = 0.01
        self.bf = BloomFilter.BloomFilter(self.n, self.p)

        # 打开输出文件
        self.index = open("IDURL.csv", 'a', encoding='gbk')
        self.content = open("IDCONTENT.csv", 'a', encoding='gbk')

        # IP池所需配置
        # self.headers = {'User-Agent': 'Mozilla/5.0'}
        self.headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
                          'Chrome/70.0.3538.77 Safari/537.36 '
        }
        # self.headers = { "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko)
        # Chrome/81.0.4044.138 Safari/537.36", "Referer": "https://www.qiushibaike.com/" } self.flag = 1

        # 分词实现
        self.wordSeg = {}
        self.keyWord = open("wordSeg.csv", 'a', encoding='gbk')

    # 读取初始Seeds种子网站
    def run(self):
        # 读取种子网站
        seeds_path = os.path.expanduser('./seeds.txt')
        with open(seeds_path, 'r', encoding='UTF-8') as f:
            lines = f.readlines()
            for line in lines:
                rs = line.rstrip('\n')  # 移除行尾换行符
                if not self.bf.contains(rs):
                    self.q.put(rs)
                    self.bf.put(rs)
                # elif bf.contains(rs):
                #     error_count += 1
        # 从队列中读取URL进行递归调用索引
        self.getAllUrl()
        # # 多线程调用 不使用则将87-98行注释(不使用 没有配置mutex)
        # # 获取url list
        # thread_list = list()
        # for i in range(1):
        #     thread_url = threading.Thread(target=self.getAllUrl)
        #     thread_list.append(thread_url)
        #
        # for t in thread_list:
        #     # 为每个进程设置为后台进程，效果是主进程退出子进程也会退出
        #     t.setDaemon(True)
        #     t.start()
        #
        # # 让主线程等待，所有的队列为空的时候才能退出
        # self.q.join()

        # 将分词结果输出
        keys = list(self.wordSeg.keys())
        values = list(self.wordSeg.values())
        writer = csv.writer(self.keyWord, lineterminator='\n')
        for x in range(len(self.wordSeg)):
            a = [keys[x]]
            for y in values[x]:
                a.append(y)
            writer.writerow(a)
            # print(keys[x], values[x])
            # self.keyWord.write(str(keys[x]) + ' ' + str(values[x]) + '\n')
            # print(keys[x], values[x])

        return 0

        # print('error rate :', float(error_count / total_size) if error_count > 0 else 0)

    # 基于Jieba实现分词效果
    def WordSeg(self, str1):
        seg_list = jieba.cut_for_search(str1)  # 搜索引擎模式
        for seg in seg_list:
            if not self.wordSeg.get(seg):
                # self.keyWord.put(seg)
                self.wordSeg[seg] = [self.i]
            else:
                self.wordSeg[seg].append(self.i)
        return 0

    # 遍历队列中所有URL 并输出相关信息 (IDURL.csv IDCONTENT.csv)
    def getAllUrl(self):
        idUrl = csv.writer(self.index, lineterminator='\n')
        idContent = csv.writer(self.content, lineterminator='\n')
        while not self.q.empty() and self.i < self.n - 3:
            url = self.q.get()
            # print(url)
            # # 使用IP池
            # if url.split(':')[0] == 'https':
            #     proxies = get_proxies_Https()
            #     try:
            #         resp = requests.get(url=url, proxies=proxies, headers=self.headers, verify=False)
            #     except:
            #         traceback.print_exception()
            # elif url.split(':')[0] == 'http':
            #     proxies = get_proxies_Http()
            #     try:
            #         resp = requests.get(url=url, proxies=proxies, headers=self.headers, verify=False)
            #     except:
            #         traceback.print_exception()
            # else:
            #     continue
            # 如果没有合适的IP池 将上面136-149行注释，154行恢复运行

            # requests 发送请求 从服务器获取数据
            # BeautifulSoup 解析整个页面的源代码
            try:
                resp = requests.get(url)  # 从服务器拿到源码
            except:
                continue
            resp.encoding = 'utf-8'
            soup = BeautifulSoup(resp.text, "html.parser")
            tags = soup.find_all('a')
            for tag in tags:
                res1 = ''.join(re.findall('[\u4e00-\u9fa5]', str(tag)))
                m = str(tag.get('href')).strip()
                # 进行了初步的筛选 将开头并非http略去
                if m.startswith('http') and len(res1) != 0:
                    if not self.bf.contains(m):
                        self.q.put(m)
                        self.bf.put(m)
                        # 输出ID + URL至 demofile1
                        try:
                            idUrl.writerow([self.i, str(tag.get('href')).strip()])
                        except:
                            continue
                        # self.index.write(str(self.i) + ' ' + str(tag.get('href')).strip() + '\n')
                        # 输出ID + 标题内容至 demofile2
                        idContent.writerow([self.i, str(res1)])
                        # self.content.write(str(self.i) + ' ' + str(res1) + '\n')
                        # Terminal输出ID 标题 以及对应URL (不输出可注释176行)
                        print(self.i, res1, str(tag.get('href')).strip())
                        # 将分词结果记录下来
                        self.WordSeg(res1)
                        self.i = self.i + 1
                    # elif bf.contains(m):
                    #     error_count += 1
        return self.i


if __name__ == '__main__':
    obj = Qsbk()
    obj.run()
