import re
import csv
from copy import deepcopy

import requests
import pymysql


class Get:
    def __init__(self):
        self.urls = []
        self.eachUrl = []
        self.lists = []
        self.cheek = []
        self.numbers = []
        self.titles = []
        self.date = []
        self.writer = []

    def get_urls(self, page_number):
        i = 1
        self.urls.append('http://www.isebks.qd.sdu.edu.cn/xsdt/xshd.htm')
        while i <= page_number:
            url = 'http://www.isebks.qd.sdu.edu.cn/xsdt/xshd/{}.htm'.format(i)
            self.urls.append(url)
            i = i + 1
        for url in self.urls:
            if url == 'http://www.isebks.qd.sdu.edu.cn/xsdt/xshd.htm':
                html = requests.get(url)
                html.encoding = 'utf-8'
                numbers = re.findall(
                    r'<a href="../info/1015/(\d+).htm">', html.text)
                for num in numbers:
                    self.numbers.append(num)
            else:
                html = requests.get(url)
                html.encoding = 'utf-8'
                numbers = re.findall(
                    r'<a href="../../info/1015/(\d+).htm">', html.text)
                for num in numbers:
                    self.numbers.append(num)
        for num in self.numbers:
            if num not in self.cheek:
                self.cheek.append(num)
        self.numbers.clear()
        self.numbers = deepcopy(self.cheek)
        self.cheek.clear()
        for num in self.numbers:
            url = 'http://www.isebks.qd.sdu.edu.cn/info/1015/{}.htm'.format(
                num)
            self.eachUrl.append(url)
            print(url)
        return self.eachUrl

    def get_titles(self, url):
        for url in url:
            text = requests.get(url)
            text.encoding = 'utf-8'
            title = re.findall('<div class="dbt">(.*?)</div>', text.text, re.S)
            wash_titles = re.sub(r'\r|\n|<a>|</a>', '', title[0]).strip()
            self.titles.append(wash_titles)
            print(wash_titles)
        return self.titles

    # 单独获取date
    def get_date(self, url):
        for url in url:
            text = requests.get(url)
            text.encoding = 'utf-8'
            date = re.findall(
                '<div class="gengxin">(.*?)</div>',
                text.text,
                re.S)
            wash_date = re.sub(r'\r|\n|<a>|</a>|最后更新时间：', '', date[0]).strip()
            if wash_date == '':
                self.date.append('Null')
                print('Null')
            else:
                self.date.append(wash_date)
                print(wash_date)
        return self.date

    # 单独获取作者
    def get_writer(self, url):
        for url in url:
            text = requests.get(url)
            text.encoding = 'utf-8'
            writer = re.findall(
                '<div class="gengxin1">(.*?)</div>', text.text, re.S)
            wash_writer = re.sub(r'\r|\n|<a>|</a>|作者：', '', writer[0]).strip()
            if wash_writer == '':
                self.writer.append('Null')
                print('Null')
            else:
                self.writer.append(wash_writer)
                print(wash_writer)
        return self.writer

    # 单独获取时打包用成列表用
    @staticmethod
    def zipped(title, writer, date, url):
        alldict = {}
        alllist = []
        total = len(title)
        for i in range(total):
            alldict['title'] = title[i]
            alldict['writer'] = writer[i]
            alldict['date'] = date[i]
            alldict['url'] = url[i]
            alllist = deepcopy(alllist)
            alllist.append(alldict)
            print(alldict)
        return alllist


    # 写入文件
    @staticmethod
    def write(alllist):
        with open('.\\information.csv', mode='w') as f:
            writer = csv.DictWriter(
                f, fieldnames=[
                    'title', 'url', 'date', 'writer'])
            writer.writeheader()
            for each in alllist:
                writer.writerow(each)


if __name__ == '__main__':
    # x = int(input('请输入总页数:'))
    get = Get()
    urls = get.get_urls(25)
    dates = get.get_date(urls)
    write = get.get_writer(urls)
    titles = get.get_titles(urls)
    zipped = get.zipped(titles, write, dates, urls)
    get.write(zipped)
    db = pymysql.connect(
        host="localhost",
        user="root",
        password="xj20010126",
        db="student",
        port=3306)
    cur = db.cursor()
    sql = 'select * from 爬虫'
    for dic in zipped:
        # print(*dic.values())
        sql_insert = """insert into 爬虫(title,date,url,writer) values('{}','{}','{}','{}')""".format(
            dic['title'], dic['date'], dic['url'], dic['writer'])
        cur.execute(sql_insert)
        db.commit()
    db.close()
