# This is a sample Python script.
import os
import shutil
import time

import requests
from lxml import html
from bs4 import BeautifulSoup
import re



def get_content(url, id):
    #Politeness
    time.sleep(0.3)
    # File system
    path_title = r'..\Documens\title'
    path_url = r"..\Documens\url"
    path_content = r"..\Documens\content"
    # Crawler
    response = requests.get(url)
    content = response.content.decode('utf-8', 'replace')
    html_content = BeautifulSoup(content, "lxml")

    # HTML Analysis
    #     Title
    title = html_content.title
    if title.string:
        title = title.string
    else:
        title = " "
    file_handle = open(path_title + "\\" + id + ".txt", mode='w',encoding="utf-8")
    file_handle.write(title)
    file_handle.close()
    #     Url
    file_handle = open(path_url + "\\" + id + ".txt", mode='w',encoding="utf-8")
    file_handle.write(url)
    file_handle.close()
    #     Content
    file_handle = open(path_content + "\\" + id + ".txt", mode='a',encoding="utf-8")
    for child in html_content.find_all("p"):
        content = child.string
        if content:
            file_handle.write(content)
    file_handle.close()


def analyse_page(url, regex):
    # Crawler
    response = requests.get(url)
    content = response.content.decode('utf-8', 'replace')
    html_content = BeautifulSoup(content, "lxml")

    # HTML Analysis
    # Get every urls in this page
    for child in html_content.find_all("a",{"class": "title_o"}):
        parse_url = child.get("href")
        # Match regex form
        match = regex.match(parse_url)
        # Clean unrelated urls
        if match:
            # Get suffix
            suffix = parse_url
            # Get full url
            full_url = "http://www.hitsz.edu.cn" + suffix
            print(full_url)
            # Get id
            id = match.groups()[0]
            print(id)
            # Deep into
            get_content(full_url, id)



def test():
    # response = requests.get("http://www.hitsz.edu.cn/article/view/id-98477.html")
    # content = response.content.decode('utf-8', 'replace')
    # html_content = BeautifulSoup(content, "lxml")
    # attr = {"target": "_blank"}
    # for child in html_content.find_all("a"):
    #     url = child.get("href")
    #     match = re.match(r'/\w+/\w+/id-(\d{5}).html',url)
    #     if match:
    #         id = match.groups()[0]
    #         print(id)
    #         print(url)

    response = requests.get("http://www.hitsz.edu.cn/article/view/id-98477.html")
    content = response.content.decode('utf-8', 'replace')
    html_content = BeautifulSoup(content, "lxml")
    attr = {"target": "_blank"}
    for child in html_content.find_all("p"):
        if child.string:
            print(child.string)



if __name__ == '__Crawler__':
    regex = re.compile(r'/\w+/\w+/id-(\d{5}).html')
    for i in range(20):
        analyse_page("http://www.hitsz.edu.cn/article/id-75.html?maxPageItems=20&keywords=&pager.offset="+str(i*20), regex)
        analyse_page("http://www.hitsz.edu.cn/article/id-116.html?maxPageItems=20&keywords=&pager.offset="+str(i*20), regex)
        analyse_page("http://www.hitsz.edu.cn/article/id-80.html?maxPageItems=20&keywords=&pager.offset="+str(i*20), regex)
        analyse_page("http://www.hitsz.edu.cn/article/id-74.html?maxPageItems=20&keywords=&pager.offset="+str(i*20), regex)
        analyse_page("http://www.hitsz.edu.cn/article/id-77.html?maxPageItems=20&keywords=&pager.offset="+str(i*20), regex)

    # test()
