# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html


# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
import pymongo
from datetime import datetime
import pytz
import csv
# import redis
import time
# from hdfs.client import Client
# import happybase
import os
from pyquery import PyQuery as pq

# 获取当前时间用于拼接请求url
def get_time():
    now = datetime.utcnow()
    now = now.replace(tzinfo=pytz.utc)
    now_new = now.astimezone(pytz.timezone('Asia/Shanghai'))
    now_new_str =now_new.strftime('%Y-%m%d-%H:%M')
    return now_new_str.split('-')

# 生成时刻列表
def create_time_list(date, point):
    time_list = []
    d = f"{int(date[0:2])}-{int(date[2:])}"
    H = int(point.split(':')[0])
    M = int(point.split(':')[1])
    for i in range(M, 60):
        if i < 10:
            if H-1 < 10:
                time_list.append(f"{d} 0{H-1}:0{i}")
            else:
                time_list.append(f"{d} {H-1}:0{i}")
            continue
        time_list.append(f"{d} {H-1}:{i}")

    for i in range(0, M):
        if i < 10:
            if H < 10:
                time_list.append(f"{d} 0{H}:0{i}")
            else:
                time_list.append(f"{d} {H}:0{i}")
            continue
        time_list.append(f"{d} {H}:{i}")

    return time_list

def callback(filename, size):
    print(filename, "完成一个chunk上传", "当前大小", size)
    if size == -1:
        print("文件上传成功")

now_time = get_time()
time_list = create_time_list(now_time[1], now_time[2])

class NewsMongoPipeline:
    def __init__(self):
        # 初始化数据库
        self.myclient = pymongo.MongoClient("mongodb://mongodb:27017")
        # self.Cache = self.myclient["Cache"]
        self.Cache = self.myclient["Test"]
        # self.cache = self.Cache["cache"]
        self.cache = self.Cache["test"]
        self.mydb = self.myclient[f"{now_time[0]}"]
        self.mycol = self.mydb[f"{now_time[1][0:2]}"]
        self.data = []

    def process_item(self, item, spider):
        if item.get("datetime", "") in time_list:
            title = item.get("title", "")
            _type = item.get("type", "")
            if _type != "图片":
                url = f"https://www.chinanews.com.cn/{item.get('url','')}"
            else:
                url = item.get("url", "")
            datetime = item.get("datetime", "")
            
            html = item.get('html', "")
            doc = pq(html)
            doc = pq(doc('.content_maincontent_more'))
            doc.find('.right share3').remove()
            doc.find('.bigger_font_size').remove()
            doc.find('.small_font_size').remove()
            doc.find('.channel').remove()
            doc('a').attr('href', '#')
            imgList = doc('img').items()
            for i in imgList:
                imgSrc = 'https:' + i.attr('src')
                i.attr('src', imgSrc)

            html = f'<link rel="stylesheet" href="css/test.css"><div class="content_maincontent_more">{doc.html()}</div>'

            self.data.append({
                "title": title,
                "type": _type,
                "url": url,
                "time": datetime.split(' ')[1],
                "date": datetime.split(' ')[0],
                "month": now_time[1][0:2],
                "year": now_time[0],
                "html": html
            })

        # 将100条数据插入数据库
        if len(self.data) == 100:
            self.mycol.insert_many(self.data)
            self.cache.insert_many(self.data)
            self.data = []

        return item
    
    def close_spider(self, spider):
        # 将剩下不满100条的数据插入数据库
        if len(self.data) > 0:
            self.mycol.insert_many(self.data)
            self.cache.insert_many(self.data)
        self.myclient.close()

# class NewsHdfsPipeline:
#     def __init__(self):
#         self.fname = f"news_{now_time[0]}{now_time[1]}{now_time[2].split(':')[0]}{now_time[2].split(':')[1]}.csv"
#         self.f = open(self.fname, mode="a", encoding='utf-8')
#         self.writer = csv.writer(self.f)
#         self.writer.writerow(("title", "type", "url", "time", "date", "year"))
#         self.data = []
#         self.client = Client("http://master:9870/")
    
#     def close_spider(self, spider):
#         if len(self.data) > 0:
#             self.writer.writerows(self.data)
#         self.f.close()
#         self.client.upload(local_path=self.fname, hdfs_path="/user/caixinpeng/news", progress=callback, cleanup=True)
#         os.remove(self.fname)

#     def process_item(self, item, spider):
#         if item.get("datetime", "") in time_list:
#             title = item.get("title", "")
#             _type = item.get("type", "")
#             url = item.get("url", "")
#             datetime = item.get("datetime", "")
#             self.data.append((title, _type, 
#                               url, datetime.split(' ')[1], 
#                               datetime.split(' ')[0],
#                               now_time[0]))
            
#         if len(self.data) == 100:
#             self.writer.writerows(self.data)
#             self.data = []
        
#         return item

# class NewsRedisPipeline:
#     def __init__(self):
#         self.db = redis.Redis(host='redis', port=6379, db=1, password='020709', decode_responses=True)
#         self.data = {}
#         self.content = 0

#     def process_item(self, item, spider):
#         if item.get("datetime", "") in time_list:
#             title = item.get("title", "")
#             _type = item.get("type", "")
#             url = item.get("url", "")
#             datetime = item.get("datetime", "")
#             self.data[f"{self.content}"] = str({
#                 "title": title,
#                 "type": _type,
#                 "url": url,
#                 "date": datetime.split(' ')[0],
#                 "time": datetime.split(' ')[1],
#                 "year": now_time[0]
#             })
#             self.content += 1

#         return item
    
#     def close_spider(self, spider):
#         self.db.hmset(f"news_{now_time[0]}{now_time[1]}{now_time[2].split(':')[0]}{now_time[2].split(':')[1]}", self.data)

# class NewsHBasePipeline:
#     def __init__(self):
#         self.connection = happybase.Connection("master", 9090)
#         self.table_name = f"news_{now_time[0]}{now_time[1]}{now_time[2].split(':')[0]}{now_time[2].split(':')[1]}"
#         self.connection.create_table(
#             self.table_name,
#             {
#                 "info": dict()
#             }
#         )
#         if not self.connection.is_table_enabled(self.table_name):
#             self.connection.enable_table(self.table_name)
#         self.table = self.connection.table(self.table_name)

#     def process_item(self, item, spider):
#         if item.get("datetime", "") in time_list:
#             title = item.get("title", "")
#             _type = item.get("type", "")
#             url = item.get("url", "")
#             datetime = item.get("datetime", "")
#             self.table.put(title.encode('utf-8'), {
#                 "info:title": title,
#                 "info:type": _type,
#                 "info:url": url,
#                 "info:time": datetime.split(' ')[1],
#                 "info:date": datetime.split(' ')[0],
#                 "info:year": now_time[0]
#             })
        
#         return item 
        

