import time

import pymongo
import scrapy
from scrapy import Request
import sqlite3
import re


class T66yList(scrapy.Spider):
    name = 't66ylist'
    allowed_domains = ['t66y.com', 'filefab.com']

    def __init__(self, name=None, **kwargs):
        super().__init__(name=None, **kwargs)
        self.conn = sqlite3.connect('t66y.db')
        self.c = self.conn.cursor()
        self.mongo_client = pymongo.MongoClient(host='127.0.0.1', port=27017)
        self.collection = self.mongo_client.t66y.article

    def start_requests(self):
        #self.c.execute("CREATE TABLE if not exists url(id INTEGER PRIMARY KEY AUTOINCREMENT,name TEXT NOT NULL, title TEXT NOT NULL,number INTEGER NOT NULL);")
        #for index in reversed(range(1, 101)):
        for index in range(1, 10):
            print('http://t66y.com/thread0806.php?fid=25&search=&page=' + str(index))
            yield Request('http://t66y.com/thread0806.php?fid=25&search=&page=' + str(index), callback=self.parse,
                          dont_filter=True, meta={'proxy': 'http://127.0.0.1:58591'})

    def parse(self, response):
        posts = response.xpath("//tr[@class='tr3 t_one tac']")
        for index, post in enumerate(posts):
            title = post.xpath("td[@class='tal']//a/text()").extract_first()
            author = post.xpath("td/a[@class='bl']/text()").extract_first()
            url = post.xpath("td[@class='tal']//a/@href").extract_first()
            if author == "vonder" or author == "administrator" or not url or not title:
                continue

            post_time = post.xpath("td/div[@class='f12']/span/text()").extract_first()
            r = re.search(r'(\d{4}-\d{2}-\d{2})', post_time)
            if not r:
                post_time = post.xpath("td/div[@class='f12']/span/@title").extract_first()
                r = re.search(r'(\d{4}-\d{2}-\d{2})', post_time)
                if not r:
                    post_time = post.xpath("td/div[@class='f12']/span[@class='s3']/@title").extract_first()
                    r = re.search(r'(\d{4}-\d{2}-\d{2})', post_time)
                    if not r:
                        r_time = "1970-01-01"
                    else:
                        r_time = r.group()
                else:
                    r_time = r.group()
            else:
                r_time = r.group()

            if url and title:
                try:
                    index = url[url.rindex("/") + 1 : url.rindex('.')]
                    print({
                        "title": title,
                        "url": url,
                        "author": author,
                        "time": r_time
                    })
                    # sql_template = "INSERT OR REPLACE INTO url(name,title,number) VALUES (?,?,?)"
                    # self.c.execute(sql_template, (url, title, int(index)))
                    # self.conn.commit()
                except Exception as e:
                    print(e)
                    print(url)

            if url:
                mongo_item = self.collection.find_one({"url": url})
                #if not mongo_item:
                print('not find mongo_item')
                yield Request('http://t66y.com/' + url, callback=self.parse_detail,
                                  dont_filter=True,
                                  meta={'proxy': 'http://127.0.0.1:58591',
                                        'item': {"title": title, "author": author, "time": re.search(r'(\d{4}-\d{2}-\d{2})', post_time).group().strip(), "url": url, "index": int(index)}})
                #else:
                #    print('find mongo_item')
            else:
                print("url is none " + author)

    def parse_detail(self, response):
        details = response.xpath("//div[@class='tpc_content do_not_catch']//text()|//div[@class='tpc_content do_not_catch']//img/@ess-data")
        items = [item for item in details if "名称" in item.extract()]
        item = response.meta['item']
        item['details'] = []
        item['links'] = []

        if len(items) > 1:
            struct = {'name': '', 'imgs': []}
            dict.setdefault(struct, 'imgs', [])
            imgs = []
            for detail in details:
                value = detail.extract()
                if "名稱" in value or "名称" in value:
                    struct = {'name': value, 'imgs': []}
                    item['details'].append(struct)

                elif ("格式" in value or "格式" in value) and struct:
                    struct['format'] = value

                elif ("大小" in value or "大小" in value) and struct:
                    struct['size'] = value

                elif ("时长" in value or "时间" in value) and struct:
                    struct['time'] = value

                elif type(value) == str and (value.startswith('http') or value.startswith('https')):
                    if struct :
                        struct['imgs'].append(value)

            external_links = response.xpath("//a[@style='cursor:pointer;color:#008000;']/text()")
            for link in external_links:
                item['links'].append(link.extract())
        else:
            print("detail")
            print(details)
            name = self.get_text(details, "名稱")
            if not name:
                name = self.get_text(details, "名称")

            vformat = self.get_text(details, "格式")
            if not vformat:
                vformat = self.get_text(details, "格式")

            size = self.get_text(details, "大小")
            if not size:
                size = self.get_text(details, "大小")

            datetime = self.get_text(details, "时长")
            if not datetime:
                datetime = self.get_text(details, "时间")

            imgs = []
            detail_imgs = response.xpath("//div[@class='tpc_content do_not_catch']//img/@data-src")
            for img in detail_imgs:
                imgs.append(img.extract())

            detail_imgs = response.xpath("//div[@class='tpc_content do_not_catch']//img/@ess-data")
            for img in detail_imgs:
                imgs.append(img.extract())

            external_links = response.xpath("//a[@style='cursor:pointer;color:#008000;']/text()")
            for link in external_links:
                item['links'].append(link.extract())

            item['details'].append({
                "name": name,
                "vformat": vformat,
                "time": datetime,
                "size": size,
                "imgs": imgs,
            })
        print(item)
        yield item

    @staticmethod
    def get_text(li, value):
        for item in li:
            if value in item.extract():
                return item.extract()
        return ""
