# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import datetime
import csv
import os

import re


def killAnUnseen(s):
    try:
        f = open('kill.html', 'w')
        f.write(s)
        f.close()
    except UnicodeEncodeError as err:
        info = str(err)
        st = re.search('\\\\U[a-f0-9]{8}|\\\\u[a-f0-9]{4}|\\\\x[a-f0-9]{2}', info).group()[2:]
        x = int(st, 16)
        return (s.replace(chr(x), ""), True)
    return s, False


def killUnseen(s):
    while (True):
        s, res = killAnUnseen(s)
        if res == False:
            return s


class CrawlerPipeline(object):
    def process_item(self, item, spider):
        return item


def beautfy_str(line):
    return killUnseen(line)
    # return line.replace('\u200b', '').replace('\xa0', '').replace('\xb2', '').replace('\ue614', '').replace('\ue60d', '')


class FeedCsvPipeline(object):
    def process_item(self, item, spider):
        updatetime = datetime.datetime.now().strftime('%Y%m%d')
        filename = os.getcwd() + '\\data\\%s_%s.csv' % (spider.name, updatetime)
        if not os.path.exists(filename):
            with open(filename, 'a', newline='', encoding='gbk') as csvfile:
                f = csv.writer(csvfile)
                f.writerow(sorted(item.keys()))
        with open(filename, 'a', newline='', encoding='gbk') as csvfile:
            f = csv.writer(csvfile)
            f.writerow([beautfy_str(item[key]) for key in sorted(item.keys())])
        return item
