# 爬虫
import os.path
import  json
def saveWebTxt(str):
    result1 = str.replace("\n\n", "\n")
    fw = open(txt_dir, "a", encoding="utf-8")
    lines = result1.split("\n")
    for line in lines:
        if len(line.strip()) > 0:
            fw.write(line + "\n")
    fw.close()

import requests
from requests.packages import urllib3
from  bs4 import BeautifulSoup
urllib3.disable_warnings()
import logging
logging.captureWarnings(True)

save_dir = "../webs/"
os.makedirs(save_dir,exist_ok=True)

txt_dir = os.path.join(save_dir,"fuck3.txt")
history_dir = os.path.join(save_dir,"history1.txt")
url_history = set()

# url末尾不要加 /
url_abs = "https://www.myanmar-now.org"
url_start ="https://www.myanmar-now.org/mm"

global ii
ii = 0
def getTxt (url_start):
    global ii
    if ii > 100000:
        return
    # 当前页面的所有URL
    if url_start in url_history:
        return
    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
                             'Chrome/51.0.2704.63 Safari/537.36'}
    try:
        r = requests.get(url_start, headers = headers, verify=False,timeout=5)
        url_history.add(url_start)
        r.raise_for_status()
    except requests.exceptions.HTTPError as e:
        print(e)
        return
    except requests.exceptions.RequestException as e:
        print(e)
        return
    if r.status_code != 200:
        return
    r.encoding = r.apparent_encoding
    if len(r.text.strip()) == 0 :
        return
    soup = BeautifulSoup(r.text,'html.parser')

    print("save txt ", url_start)
    saveWebTxt(soup.text)
    ii = ii +1
    urls_page = list()
    for link in soup.find_all('a'):
        str = link.get('href')
        if str is None:
            continue
        # 自定义规则
        if "en" in str:
            continue
        if "facebook" in str:
            continue
        if "youtube" in str:
            continue
        if "twitter" in str:
            continue
        if "instagram" in str:
            continue
        if "http" in str:
            url = str
            if ("/my" not in url) and ("/bu" not in url)and ("/mm" not in url):
                continue
        else:
            url = url_abs + str
        if not url in urls_page:
            urls_page.append(url)
    # print(urls_page)
    urls_page1 = urls_page.copy()
    while len(urls_page) >0 :
        current_url = urls_page.pop(0)
        getTxt(current_url)
    f_history1 = open(history_dir,"a",encoding="utf-8")
    for url11 in urls_page1:
        f_history1.write(url11+"\n")
    f_history1.close()
    print("save history")
    return

if __name__ == "__main__":
    f_history = open(history_dir, "r", encoding="utf-8")
    lines = f_history.readlines()
    for line in lines:
        url_history.add(line.strip("\n"))
    f_history.close()
    getTxt(url_start)