# 1.requests 发送请求 从服务器获取数据
# 2.BeautifulSoup 解析整个页面的源代码

import requests
from bs4 import BeautifulSoup
import re
import queue
import os
import BloomFilter


# 发送请求到服务器

# def print_hi(name):
#     # Use a breakpoint in the code line below to debug your script.
#     print(f'Hi, {name}')  # Press Ctrl+F8 to toggle the breakpoint.


def GetURL(name):
    # resp = requests.get("https://www.sina.com.cn/")  # 从服务器拿到源码
    resp = requests.get(name)  # 从服务器拿到源码
    resp.encoding = 'utf-8'
    # print(resp.text)

    # 解析html
    main_page = BeautifulSoup(resp.text, "html.parser")
    # 从页面中找到某些东西
    # find() 找一个
    # find_all() 找所有
    aLst = main_page.find("div", attrs={"class": "main"}).find_all("a", attrs={"target": "_blank"})

    # print(aLst)
    i = 1
    for a in aLst:
        res1 = ''.join(re.findall('[\u4e00-\u9fa5]', str(a)))
        m = a.get("href")
        if m.startswith('http'):
            q.put(m)
        print(i, res1, m)
        i = i + 1
        # href = a.get("href")  # http://www.beian.gov.cn/portal/registerSystemInfo?recordcode=11000002000016
        # # 发送请求到子页面
        # resp1 = requests.get(href)
        # resp1.encoding = 'utf-8'
        # child_page = BeautifulSoup(resp1.text, "html.parser")
        # res2 = ''.join(re.findall('[\u4e00-\u9fa5]', str(child_page)))
        # print(res2)
    # print(child_page.prettify())
    # 找到图片的真实路径
    # src = child_page.find("div", attrs={"class": "article"})
    # if src:
    #     cur = src.find("p").get()
    # print(src)


def init():
    # 布隆过滤器
    n = 10000
    p = 0.01
    bf = BloomFilter.BloomFilter(n, p)
    # total_size = 100000
    # error_count = 0
    # for i in range(total_size):
    #     bf.put(i)
    # for i in range(total_size):
    #     if bf.contains(i):
    #         error_count += 1

    # 读取种子网站
    seeds_path = os.path.expanduser('../seeds.txt')
    q = queue.Queue()
    i = 1
    with open(seeds_path, 'r', encoding='UTF-8') as f:
        lines = f.readlines()
        for line in lines:
            rs = line.rstrip('\n')  # 移除行尾换行符
            if not bf.contains(rs):
                q.put(rs)
                bf.put(rs)
            # elif bf.contains(rs):
            #     error_count += 1
    # 从队列中读取URL进行递归调用索引
    while not q.empty() and i < n - 3:
        m = q.get()
        print(m)
        i = getAllUrl(i, q, m, bf)
        # GetURL(m)
    # print('error rate :', float(error_count / total_size) if error_count > 0 else 0)


def getAllUrl(i, q, url, bf):
    # import urllib.request
    # html = urllib.request.urlopen(url).read().decode("utf-8")

    # url = "https://www.sina.com.cn/\n"
    resp = requests.get(url)  # 从服务器拿到源码
    resp.encoding = 'utf-8'

    soup = BeautifulSoup(resp.text, "html.parser")
    # soup = BeautifulSoup(resp.text, features='html.parser')
    tags = soup.find_all('a')
    for tag in tags:
        res1 = ''.join(re.findall('[\u4e00-\u9fa5]', str(tag)))
        m = str(tag.get('href')).strip()
        # 进行了初步的筛选 将开头并非http略去
        if m.startswith('http') and len(res1) != 0:
            if not bf.contains(m):
                q.put(m)
                bf.put(m)
                # 输出排序序号 标题 以及对应URL
                print(i, res1, str(tag.get('href')).strip())
                i = i + 1
            # elif bf.contains(m):
            #     error_count += 1
    return i


if __name__ == '__main__':
    init()
