import csv
import datetime
import os

# 时间格式化 ok
# 连接超时阻断 ok...
# 日志方法没写
# 模块优化
# config 参数池设置
# 动态爬取，读写
# git
# 月初流量见底啊MD......
import random

from log_out import LogOut
from time import sleep

import requests
import urllib3
from bs4 import BeautifulSoup
from requests.adapters import HTTPAdapter
from urllib3.exceptions import InsecureRequestWarning
from config import user_agent,get_detail_continue_id,get_detail_scv

urllib3.disable_warnings(InsecureRequestWarning)


class get_detail_info(object):
    def __init__(self):
        sess = requests.Session()
        sess.mount('http://', HTTPAdapter(max_retries=5))
        sess.mount('https://', HTTPAdapter(max_retries=5))
        sess.keep_alive = False
        self.headers = {'User-Agent': user_agent,
                        'Connection': 'close'
                        }
        pass

    def get_page(self, url, index):
        LogOut.get_detail_news_progress('getting page {}'.format(url))
        try:
            req = requests.get(
                url,
                # params=self.params,
                headers=self.headers,
                verify=False,
                timeout=(50, 80)
            ).text
        except (OSError, requests.HTTPError, requests.ConnectionError, Exception) as e:
            # print(e)
            LogOut.Error_get_detail_news('in get_page:\n\t' + str(e))
            return 0
        soup = BeautifulSoup(req, 'lxml')
        return soup

    @staticmethod
    def date_replace(datestr):
        return datetime.datetime.strptime(datestr, "%Y-%m-%dT%H:%M+0800")

    def analysis_data(self, soup, index):
        try:
            wrapper = soup.select('[class = wrapper]')
        except:
            wrapper = []
            pass
        try:
            keywords = wrapper[0].select('[class=endless__item]')[0].get('data-analytics-tags').replace(', ', ' ')
        except:
            keywords = ' '
            pass
        try:
            datacreated = wrapper[0].select('[itemprop=dateCreated]')[0].string
            datacreated = self.date_replace(datacreated)
        except:
            datacreated = ' '
        try:
            title = wrapper[0].select('[itemprop=headline]')[0].string
        except:
            title = ' '
        try:
            summary = wrapper[0].select('[class=article__announce-text]')[0].string
        except:
            summary = ' '
        try:
            articles = wrapper[0].select('[class=article__block]')
        except:
            articles = []
        article = ''
        try:
            imgs = wrapper[0].select('[class=media]')[0].img.get('src')
        except:
            imgs = ' '
        if articles:
            for i in articles:
                if i.select('[data-type=article]'):
                    continue
                else:
                    for n in i.strings:
                        article += n
        article = article.replace(' ','')
        try:
            imgname = imgs.split('/')[-1].split('_')[-1]
        except:
            imgname = ''

        imgsrc = './imgs/' + index + '/' + imgname

        items = {'id': index, 'title': title, 'summary': summary, 'article': article,
                 'datacreated': datacreated, 'keywords': keywords, 'imgsurl': imgs, 'imgsrc': imgsrc}
        # print(title)
        # print(summary)
        # print(article)
        # print(datacreated)
        # print(keywords)
        # print(imgs)
        return items


get_detail_info = get_detail_info()


class ReadAndSave(object):

    def __init__(self):
        sess = requests.Session()
        sess.mount('http://', HTTPAdapter(max_retries=5))
        sess.mount('https://', HTTPAdapter(max_retries=5))
        sess.keep_alive = False
        self.headers = {'User-Agent': user_agent,
                        'Connection': 'close'
                        }
        pass

    def read_url_csv(self, file_name):
        fl = 0 if get_detail_continue_id!='' else 1
        count = 0
        Epoch = 1
        with open(file_name, encoding='utf-8-sig') as fd:
            for row in csv.DictReader(fd):
                if get_detail_continue_id!='' and row['id'] == get_detail_continue_id:
                    fl = 1
                if fl == 1:
                    count += 1
                    if count == 30:
                        count = 0
                        LogOut.get_detail_news_progress('-----------Epoch{},一批30，完毕-------------'.format(Epoch))
                        Epoch += 1
                        sleep(random.randint(2, 5))
                    if row['id']:
                        try:
                            now_id =  row['id']
                            with open('get_detail_now_id.txt', 'w') as f:
                                f.write(now_id)
                        except Exception as e:
                            LogOut.Error_get_detail_news('in read_url_csv()'+'写入id断点失败'+e)
                            continue
                    soup = get_detail_info.get_page(row['url'], row['id'])
                    if soup == 0:
                        LogOut.Error_get_detail_news('网络错误')
                        break
                    items = get_detail_info.analysis_data(soup, row['id'])
                    self.page_item(items)
        LogOut.get_detail_news_progress('执行完毕，进度已保存')

    def download_pic(self, pic_url, down_dir, pic_name):
        if not os.path.isdir(down_dir):
            os.makedirs(down_dir)
        # print(pic_url)
        try:
            pic = requests.get(pic_url,
                               headers=self.headers,
                               verify=False,
                               timeout=(50, 80)).content
        except (OSError, requests.HTTPError, requests.ConnectionError, Exception) as e:
            LogOut.Error_get_detail_news('in download_pic:\n\t' + str(e))
            return 0
        pic_download_dir = down_dir + '/' + pic_name
        try:

            with open(pic_download_dir, 'wb+') as f:
                f.write(pic)
        except Exception as er:
            LogOut.Error_get_detail_news('in download_pic/存图片:\n\t' + str(er))

            return 0
        return 1

    def page_item(self, item):
        base_dir = './articles'
        if not os.path.isdir(base_dir):
            os.makedirs(base_dir)
        file_path = base_dir + os.sep + 'article' + '.csv'
        if not os.path.isfile(file_path):
            is_first_write = 1
        else:
            is_first_write = 0
        if item:
            with open(file_path, 'a', encoding='utf-8-sig', newline='') as f:
                writer = csv.writer(f)
                if is_first_write:
                    header = [
                        'id', 'title', 'datacreated', 'keywords', 'summary', 'article',
                        'imgsurl', 'imgsrc'
                    ]
                    writer.writerow(header)
                try:
                    writer.writerow(
                        [item['id'], item['title'], item['datacreated'], item['keywords'],
                         item['summary'], item['article'], item['imgsurl'], item['imgsrc']
                         ]
                    )
                except Exception as e:
                    LogOut.Error_get_detail_news('in pageitem():\n\t' + str(e))
                    return
                rdir = item['imgsrc'].split('/')

                imgdir = base_dir + '/' + rdir[1] + '/' + rdir[2]
                pic_name = rdir[3]
                # print(item['imgsurl'])
                # print(imgdir)
                if self.download_pic(item['imgsurl'], imgdir, pic_name):
                    LogOut.get_detail_news_progress('下载图片：' + pic_name + '\t成功\n--')

        return item


ReadAndSave = ReadAndSave()
# get_detail_info.read_url_csv('./时间线/时间线副本.csv')
ReadAndSave.read_url_csv(get_detail_scv)
