import re
import csv
import time
import requests
from copy import deepcopy

# 获取所有页面地址


def geturl(x):
    i = 1
    urls = ['http://www.isebks.qd.sdu.edu.cn/xsdt/xshd.htm']
    while i <= x:
        url = 'http://www.isebks.qd.sdu.edu.cn/xsdt/xshd/{}.htm'.format(i)
        urls.append(url)
        i = i + 1
    return urls


def getall(urls):
    dic = {}
    lists = []
    cheek = []
    newest = {}
    for url in urls:
        if url == 'http://www.isebks.qd.sdu.edu.cn/xsdt/xshd.htm':
            html = requests.get(url)
            html.encoding = 'utf-8'
            numbers = re.findall(
                r'<a href="../info/1015/(\d+).htm">', html.text)
            # 解决不能爬取最新一篇的BUG
            maxnumber = max(numbers)
            nextUrl = 'http://www.isebks.qd.sdu.edu.cn/info/1015/{}.htm'.format(
                maxnumber)
            text = requests.get(nextUrl)
            text.encoding = 'utf-8'
            # 标题
            title = re.findall('<div class="dbt">(.*?)</div>', text.text, re.S)
            titles = re.sub(r'\r|\n|<a>|</a>', '', title[0]).strip()

            # 日期
            date = re.findall(
                '<div class="gengxin">(.*?)</div>',
                text.text,
                re.S)
            dates = re.sub(r'\r|\n|<a>|</a>|最后更新时间：', '', date[0]).strip()

            # url
            oneurl = text.url

            # 作者
            writer = re.findall(
                '<div class="gengxin1">(.*?)</div>', text.text, re.S)
            writers = re.sub(r'\r|\n|<a>|</a>|作者：', '', writer[0]).strip()
            newest['title'] = titles
            if dates == '':
                newest['date'] = 'none'
            else:
                newest['date'] = dates
            newest['url'] = oneurl
            if writers == '':
                newest['writer'] = 'none'
            else:
                newest['writer'] = writers
            cheek.append(newest)

            # 遍历网址
            for number in numbers:
                nextUrl = 'http://www.isebks.qd.sdu.edu.cn/info/1015/{}.htm'.format(
                    number)
                text = requests.get(nextUrl)
                text.encoding = 'utf-8'
                # 标题
                title = re.findall(
                    '<div class="dbt">(.*?)</div>', text.text, re.S)
                titles = re.sub(r'\r|\n|<a>|</a>', '', title[0]).strip()

                # 日期
                date = re.findall(
                    '<div class="gengxin">(.*?)</div>', text.text, re.S)
                dates = re.sub(r'\r|\n|<a>|</a>|最后更新时间：', '', date[0]).strip()

                # url
                oneurl = text.url

                # 作者
                writer = re.findall(
                    '<div class="gengxin1">(.*?)</div>', text.text, re.S)
                writers = re.sub(r'\r|\n|<a>|</a>|作者：', '', writer[0]).strip()

                shengao = re.findall('编辑(.*?)<br>', text.text, re.S)
                dic['title'] = titles
                if dates == '':
                    dic['date'] = 'none'
                else:
                    dic['date'] = dates
                dic['url'] = oneurl
                if writers == '':
                    dic['writer'] = 'none'
                else:
                    dic['writer'] = writers
                if shengao  == '':
                    dic['shengao'] = 'none'
                else:
                    dic['shengao'] = shengao

                print(dic)

                lists = deepcopy(lists)
                lists.append(dic)

            for list in lists:
                if list == '':
                    cheek.append('null')
                elif list not in cheek:
                    cheek.append(list)
        else:
            html = requests.get(url)
            html.encoding = 'utf-8'
            numbers = re.findall(
                r'<a href="../../info/1015/(\d+).htm">', html.text)
            for number in numbers:
                nextUrl = 'http://www.isebks.qd.sdu.edu.cn/info/1015/{}.htm'.format(
                    number)
                text = requests.get(nextUrl)
                text.encoding = 'utf-8'
                # 标题
                title = re.findall(
                    '<div class="dbt">(.*?)</div>', text.text, re.S)
                titles = re.sub(r'\r|\n|<a>|</a>', '', title[0]).strip()

                # 日期
                date = re.findall(
                    '<div class="gengxin">(.*?)</div>', text.text, re.S)
                dates = re.sub(r'\r|\n|<a>|</a>|最后更新时间：', '', date[0]).strip()

                # url
                oneurl = text.url

                # 作者
                writer = re.findall(
                    '<div class="gengxin1">(.*?)</div>', text.text, re.S)
                writers = re.sub(r'\r|\n|<a>|</a>|作者：', '', writer[0]).strip()

                shengao = re.findall('编辑(.*?)<br>', text.text, re.S)
                dic['title'] = titles
                if dates == '':
                    dic['date'] = 'null'
                else:
                    dic['date'] = dates
                dic['url'] = oneurl
                if writers == '':
                    dic['writer'] = 'null'
                else:
                    dic['writer'] = writers

                if shengao == '':
                    dic['shengao'] = 'none'
                else:
                    dic['shengao'] = shengao


                print(dic)
                # print(*dic.values())

                lists = deepcopy(lists)
                lists.append(dic)

            for list in lists:
                if list == '':
                    cheek.append('null')
                elif list not in cheek:
                    cheek.append(list)

    return cheek


# 写入文件
def write(alllist):
    with open('.\\info.csv', mode='w', newline='') as f:
        writer = csv.DictWriter(
            f,
            fieldnames=[
                'title',
                'url',
                'date',
                'writer',
                'shengao'])
        writer.writeheader()
        for each in alllist:
            writer.writerow(each)


if __name__ == '__main__':
    time_start = time.time()
    x = 26
    urls = geturl(x)
    allinf = getall(urls)
    write(allinf)
    time_end = time.time()
    print('time cost', time_end - time_start, 's')
