from requests_html import HTMLSession
import urllib.parse
import requests
import time
import json
import random
import sys
import re
import os
import datetime

RAW = "./Raw.md"
README = "./README.md"
URL = "https://freeitebooks.com/"
URL_MATCH = r"https://freeitebooks.com/page/(\d+)/"
URL_PAGE = "https://freeitebooks.com/page/{}/"
URL_DW = "https://itebooksfree.com/content/itebooksfree/files/{0}/{1}"
URL_DPR = r"http://file.allitebooks.com/(\d+)/(.*)"
USER_AGENTS = [
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E)",
    "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)",
    "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; 360SE)",
    "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E)",
    "Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; zh-cn) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5",
    "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:16.0) Gecko/20100101 Firefox/16.0",
    "Mozilla/5.0 (X11; U; Linux x86_64; zh-CN; rv:1.9.2.10) Gecko/20100922 Ubuntu/10.10 (maverick) Firefox/3.6.10",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 LBBROWSER",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.89 Safari/537.1",
    "Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0b13pre) Gecko/20110307 Firefox/4.0b13pre",
    "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11",
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.82 Safari/537.36 Edg/89.0.774.50"
]
HEADERS = {
    "scheme": "https",
    "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
    "accept-encoding": "gzip, deflate, br",
    "accept-language": "zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6,zh-TW;q=0.5,cy;q=0.4,ca;q=0.3",
    "cookie": "__cfduid=d422d8a3764c639c6c9211aba0d4acb2b1615622676; _ga=GA1.2.1297672438.1615622676; _gid=GA1.2.1785149186.1615622676; __tawkuuid=e::itebooksfree.com::/+Y+Ke57NDiDx8gdRvH2FAtY7Er/WFn0o6dT53u/tWCVX9anrNjcMaUNUGnurUmV::2; cf_chl_2=053ff7a9e20eab7; cf_chl_prog=x17; cf_clearance=97d809bcbca1764abf6c4f2cb61bcdad588938b3-1615736684-0-150",
    "dnt": "1",
    "cache-control": "max-age=0",
    "sec-fetch-dest": "document",
    "sec-fetch-mode": "navigate",
    "sec-fetch-site": "none",
    "sec-fetch-user": "?1",
    "upgrade-insecure-requests": "1"
}
MAX_TRIES = 10
CHUNK_SIZE = 1024
PROGRESS_LEN = 50
SAVE_AT = "./books"
session = HTMLSession()


def clear_urls():
    with open(README, "w") as f:
        f.write("# itebooksfree-links\n\n")


def add_to_urls(link):
    print("正在添加书籍链接： %s" % link)
    with open(README, "a") as f:
        f.write("- [%s](%s)\n" % (get_filename_from_link(link), link))


def clear_raw():
    with open(RAW, "w") as f:
        f.write("# itebooksfree-raw-links\n\n")


def add_to_raw(link):
    with open(RAW, "a") as f:
        f.write("- [%s](%s)\n" % (get_filename_from_link(link), link))


def get_filename_from_link(link):
    return urllib.parse.unquote_plus(link.split("/")[-1])


def get_next_date(p_date):
    year = p_date // 10000
    month = (p_date - year * 10000) // 100
    day = p_date % 100
    date1 = datetime.date(year, month, day)
    delta = datetime.timedelta(days=1)
    date2 = date1 + delta
    return int(str(date2).replace("-", ""))


def download_file(link):
    filename = get_filename_from_link(link)
    if ".pdf" in link or ".epub" in link:
        os.makedirs(SAVE_AT, exist_ok=True)
        filepath = os.path.join(SAVE_AT, filename)
        ok = False
        with open(filepath, "wb") as f:
            print("正在下载 %s" % filename)
            HEADERS["user-agent"] = random.choice(USER_AGENTS)
            response = requests.get(link, stream=True, headers=HEADERS)
            total = response.headers.get("content-length")
            if total is not None:
                downloaded = 0
                total = int(total)
                for data in response.iter_content(chunk_size=CHUNK_SIZE):
                    downloaded += len(data)
                    f.write(data)
                    done = int(PROGRESS_LEN * downloaded / total)
                    params = ("=" * done, " " * (PROGRESS_LEN - done))
                    sys.stdout.write("\r[%s%s]" % params)
                    sys.stdout.flush()
                print("%s 下载完成！" % filename)
                ok = True
        if not ok:
            print("下载失败: %s" % link)
            os.remove(filepath)


def is_valid_link(link):
    HEADERS["user-agent"] = random.choice(USER_AGENTS)
    response = session.get(link, allow_redirects=False,
                           timeout=10, headers=HEADERS)
    # print("状态码: %d" % response.status_code)
    status = response.status_code == 200
    return (status, response.status_code)


def handle_book_link(p_date, p_book, tries=0):
    try:
        p_date = int(p_date)
        link = URL_DW.format(p_date, p_book)
        status, code = is_valid_link(link)
        print("正在处理书籍链接: %s => %d" % (link, code))
        if status:
            # download_file(link)
            add_to_urls(link)
        else:
            tries += 1
            if tries < MAX_TRIES:
                p_date = get_next_date(p_date)
                handle_book_link(p_date, p_book, tries)
    except Exception as e:
        print(e)


def fetch_book_link(link):
    response = session.get(link)
    links = response.html.find(".entry-content a")
    for link in links:
        if link.text == "Download PDF":# or link.text == "Download ePub":
            raw = link.attrs["href"]
            # print("书籍原始链接: %s" % raw)
            add_to_raw(raw)
            m = re.match(URL_DPR, raw)
            if m is not None:
                p_date, p_book = m.groups()
                if p_date and p_book:
                    handle_book_link(p_date, p_book)


def fetch_page_links(page):
    print("正在抓取第%d页" % page)
    link = URL_PAGE.format(page)
    response = session.get(link)
    links = response.html.find(".entry-title a")
    for link in links:
        fetch_book_link(link.attrs["href"])


def fetch_pages():
    page = 0
    response = session.get(URL)
    links = response.html.find(".nav-links a")
    for link in links:
        _class = link.attrs["class"][0]
        if _class == "page-numbers":
            m = re.match(URL_MATCH, link.attrs["href"])
            if m is not None:
                page = max(page, int(m.groups()[0]))
    return page


def clawer():
    clear_urls()
    clear_raw()
    page = fetch_pages()
    print("总页数: %d" % page)
    if page > 0:
        for p in range(1, page+1):
            fetch_page_links(p)


if __name__ == "__main__":
    clawer()
