import csv
import datetime
import os
import random
import time

from log_out import LogOut
from time import sleep

import requests
import urllib3
from bs4 import BeautifulSoup
from requests.adapters import HTTPAdapter
from urllib3.exceptions import InsecureRequestWarning
from config import user_agent,base_url,url

urllib3.disable_warnings(InsecureRequestWarning)


class Spider(object):
    def __init__(self):
        super(Spider, self).__init__()
        self.headers = {'User-Agent': user_agent,
                        'Connection': 'close'
                        }
        # self.params = {
        #     'id': '1040446055',
        #     'date': '20220402T122005',
        #     'articlemask': 'lenta',
        #     'type': 'lenta',
        # }

    def get_title(self, url):
        print(1)
        try:
            req = requests.get(
                url,
                # params=self.params,
                headers=self.headers,
                verify=False,
                timeout=(50, 80)
            ).text
        except Exception as e:
            # print(e)
            LogOut.Error_log_out('in get_title\n\t' + str(e))
            return '张忠全'
        soup = BeautifulSoup(req, 'lxml')
        title = soup.select('.article__title')[0].string
        return title

    @staticmethod
    def time_trans(number):
        time_local = time.localtime(number)
        # 转换成新的时间格式(2016-05-05 20:28:54)
        return time.strftime("%Y-%m-%d %H:%M:%S", time_local)

    def get_url_comment(self, url):
        sess = requests.Session()
        sess.mount('http://', HTTPAdapter(max_retries=3))
        sess.mount('https://', HTTPAdapter(max_retries=3))
        sess.keep_alive = False

        try:
            req = requests.get(
                url,
                # params=self.params,
                headers=self.headers,
                verify=False,
                timeout=(50, 60)
            ).text
        except Exception as e:
            # print(e)
            LogOut.Error_log_out('in get_url_comment:\n\t' + str(e))

            return '张忠全'
        soup = BeautifulSoup(req, 'lxml')
        # print(soup)
        return soup


spider = Spider()


class CsvPipeline(object):

    @staticmethod
    def process_item(item):
        base_dir = './时间线'
        if not os.path.isdir(base_dir):
            os.makedirs(base_dir)
        file_path = base_dir + os.sep + '时间线' + '.csv'
        if not os.path.isfile(file_path):
            is_first_write = 1
        else:
            is_first_write = 0
        if item:
            with open(file_path, 'a', encoding='utf-8-sig', newline='') as f:
                writer = csv.writer(f)
                if is_first_write:
                    header = [
                        'id', 'title', 'datetime', 'url'
                    ]
                    writer.writerow(header)
                try:
                    writer.writerow(
                        [item['id'], item['title'], item['datetime'], item['url']]
                    )
                except:
                    return
        return item


CsvPipeline = CsvPipeline()


class BeginSpider(object):

    @staticmethod
    def get_and_save(url):
        itm = 0
        while itm < 300:
            print(itm)
            itm += 1
            sleep(random.randint(2, 5))
            soup = spider.get_url_comment(url)
            if soup == '张忠全':
                continue
            next_url = []
            try:
                next_url = base_url + soup.select('.lenta__item')[-1].get('data-next')
            except:
                continue
            result = soup.select('div .lenta__item-size')
            # print(result)

            for i in result:
                lurl = i.get('href')
                id = lurl[lurl[1:].index('/') + 2:].replace('.html', '')
                nurl = base_url + lurl
                time = Spider.time_trans(int(i.select('.lenta__item-date')[0].get('data-unixtime')))
                title = i.select('.lenta__item-text')[0].string
                # print(len(str(title)))
                if len(str(title)) == 4:
                    status = spider.get_title(nurl)
                    if status == '张忠全':
                        continue
                    title = status
                item = {'id': id, 'url': nurl, 'datetime': time, 'title': title}
                print(item)
                CsvPipeline.process_item(item)
            LogOut.process_log_output(next_url + '一批20，完毕')
            if next_url:
                url = next_url
                with open('nexturl.txt', 'w') as f:
                    f.write(next_url)
        LogOut.process_log_output(str(itm) + '完成')


BeginSpider = BeginSpider()

BeginSpider.get_and_save(url)
