# -*- coding: utf-8 -*-
import datetime
import io
import logging
import os.path
import pprint
import smtplib
import socket
from email.mime.text import MIMEText

import pika
from scrapy.exceptions import DropItem
from scrapy.exporters import JsonLinesItemExporter
from util.AmqBlockingPublisher import AmqBlockingPublisher
from util.CustomConfigParser import CustomConfigParser
from util.DatabaseManager import DatabaseManager


class IndexPagePipeline(object):
    def __init__(self):
        self.dbManager = None

    def open_spider(self, spider):
        self.dbManager = DatabaseManager(spider.sourceId)

    def close_spider(self, spider):
        self.dbManager.close()

    def process_item(self, item, spider):
        """
        Process urls extracted from index page.
        We maybe also need to input string topic_id here
        """
        urls = []
        for url in item['urls']:
            # if urlparse(url).scheme == 'http' or urlparse(url).scheme == 'https':
            #     urls.append(url)
            urls.append(url)
        self.dbManager.addToQueueDatabase(urls)


# put single item into list to be consistent
def rebox_single_item_into_list(item, key):
    if key in item:
        if isinstance((item[key]), str):
            item[key] = [item[key]]


class ScraperPipeline(object):
    def __init__(self):
        self.last_successful_scrape = None
        self.dbManager = None
        self.stream = io.BytesIO()
        self.exporter = JsonLinesItemExporter(self.stream)
        self.producer = AmqBlockingPublisher() # 'rabbitmq.cfg'
        self.scrape_statistic = {'urls_scraped': 0, 'missing_xpaths': set()}

    def open_spider(self, spider):
        self.dbManager = spider.dbManager
        self.producer.connect()
        self.exporter.start_exporting()

    def process_item(self, item, spider):
        self.last_successful_scrape = datetime.datetime.now()

        try:
            self.send_item(item)
            self.scrape_statistic['urls_scraped'] += 1
            self.update_url(item['_url_unique'])
            return item
        except (pika.exceptions.ConnectionClosed, pika.exceptions.ChannelClosed, socket.gaierror):
            # if rabbitmq connection is not open shutdown spider and don't update databases
            logging.error('Faulty Amq Connection. Closing Spider')
            spider.close_down = True
            raise DropItem

    def send_item(self, item):
        """Sends scraped article via rabbitmq."""
        self.exporter.export_item(item)
        message = self.stream.getvalue()
        # clear stream so messages will only be sent once
        self.stream.truncate(0)
        self.stream.seek(0)
        self.producer.publish_message(message)


    def update_url(self, url):
        """Updates send article in databases."""
        self.dbManager.addToScrapedDatabase(url)
        self.dbManager.clearFromQueueDatabase(url)


    def close_spider(self, spider):
        self.dbManager.clearQueueDatabase()
        self.dbManager.close()
        self.producer.close()
        self.exporter.finish_exporting()
        # remove missing xpaths field if there are none
        if not self.scrape_statistic['missing_xpaths']:
            del self.scrape_statistic['missing_xpaths']

        # This statistics will be logged by the flask app
        pprint.pprint(self.scrape_statistic)
        self.check_configs(spider.sourceId)

    def check_configs(self, sourceId):
        # update the last successful scrape
        path = "./data/" + sourceId + "_config_sanity.txt"
        parser = CustomConfigParser()
        settings = parser.read_config("email.cfg")["email"]

        now = datetime.datetime.now()
        if self.last_successful_scrape is not None:
            file = open(path, "w+")
            file.write("Last successful scrape performed at: \n" + str(self.last_successful_scrape))
            if 'missing_xpaths' in self.scrape_statistic:
                file.write("\nMissing Xpaths: " + str(self.scrape_statistic['missing_xpaths']))
            else:
                file.write("\nNo missing Xpaths")
            file.close()

        elif os.path.isfile(path):
            file = open(path, "r")
            file_content = file.readlines()
            file.close()

            # There are only 3 expected sates an existing config sanity file can be in:
            # (1) First (unsuccessful) scrape attempted
            # (2) Last successful scrape
            # (3) An email has been sent because either (1) or (2) did not get updated for the set amount of time

            # The first two have 3, the last one 5 lines. Any other format indicates some form of corruption

            if len(file_content) not in {3, 5}:
                logging.warning("Config sanity file for " + sourceId + " appears to be corrupted and will be reset")
                os.remove(path)
                return

            mail_sent = False
            last_mail = now
            last_scrape = datetime.datetime.strptime(file_content[1].strip(), '%Y-%m-%d %H:%M:%S.%f')
            if len(file_content) == 5:  # timestamp is in the file -> an email has been sent
                mail_sent = True
                last_mail = datetime.datetime.strptime(file_content[4].strip(), '%Y-%m-%d %H:%M:%S.%f')
            try:
                wait_time = int(settings["hours_till_warning"])
            except ValueError:
                logging.warning("Value for \"hours_till_warning\" in email.cfg appears to be invalid."
                                " Use default of 24h instead")
                wait_time = 24

            if (now - last_scrape).total_seconds() > (wait_time * 3600):  # hours to seconds
                warning = ("Last successful scrape for \"%s\" was performed more than %s hours ago(%s) Consider checking if the config file is still up to date") % \
                          (sourceId, wait_time, last_scrape)
                logging.warning(warning)
                # only one mail for same config every 24h
                # (86400 seconds = 24hours)
                if not mail_sent or (mail_sent and (now - last_mail).total_seconds() > 86400):
                    file = open(path, "w+")
                    previous_content = ""
                    if mail_sent:
                        for fc in file_content[:-2 or None]:
                            previous_content += (str(fc))
                    else:
                        for fc in file_content:
                            previous_content += (str(fc))

                    file.write(previous_content.strip())
                    self.send_mail_warning(warning, settings, file)

        else:
            file = open(path, "w+")
            file.write("First scrape attempted at: \n" + str(now))
            file.write("\nMissing Xpaths: NA")
            file.close()

    @staticmethod
    def send_mail_warning(string, settings, file):
        try:
            msg = MIMEText(string)
            msg["From"] = settings["from"]
            msg["To"] = settings["to"]
            msg["Subject"] = "Web-crawler config warning"
            server = smtplib.SMTP(settings["smtp_server"], settings["port"])
            if settings["authentication_method"] == "STARTTLS":
                server.starttls()

            if settings["username"] and settings["password"]:
                server.login(settings["username"], settings["password"])

            server.sendmail(settings["from"], settings["to"], msg.as_string())
            file.write(
                "\nLast email warning for this config has been sent at:\n" + str(datetime.datetime.now()))
            file.close()
        except Exception as ex:
            logging.warning("An exception occurred. Could not send email warning.")
            logging.error(ex)
            file.close()


