# -*- coding=utf-8

import scrapy
import os.path
import urllib.request
import time
from rotorbuilds.items import RotorbuildsItem
import os, sys
import cv2
import urllib.parse
from scrapy.utils.project import get_project_settings
import pymysql
from pydispatch import dispatcher
from scrapy import signals
import copy
import sys
            
class FpvSpider(scrapy.Spider):

    name = 'fpv'
    allowed_domains = ['rotorbuilds.com']
    start_urls = [
        'https://rotorbuilds.com/profile/activities.php?mode=filter&endless=1&s=&age=&sort=date&p={}&_={}'
    ]

    # proxy = "http://127.0.0.1:1087"

    add_ids = []

    settings = get_project_settings()

    def __init__(self):
        dispatcher.connect(self.spider_closed, signals.spider_closed)

    def spider_closed(self, spider):
        self.cursor.close()
        self.conn.close()
        print("over!!!!!!!!!!!!!!!")

    def start_requests(self):

        settings = get_project_settings()
        # 建立连接
        self.conn = pymysql.connect(
            host=settings['MYSQL_HOST'],
            port=settings['MYSQL_PORT'],
            user=settings['MYSQL_USER'],
            password=settings['MYSQL_PASSWD'],
            db=settings['MYSQL_DBNAME'],
            charset='utf8'  # 有中文要存入数据库的话要加charset='utf8'
        )
        # 创建游标
        self.cursor = self.conn.cursor()

        headers = {
            "Accept": "*/*",
            "Accept-Encoding": "gzip, deflate, br",
            "Accept-Language": "zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7",
            "Connection": "keep-alive",
            "Host": self.allowed_domains[0],
            "DNT": 1,
            "sec-ch-ua": '"Chromium";v="88", "Google Chrome";v="88", ";Not A Brand";v="99"',
            "Referer": "https://rotorbuilds.com/explore",
            "sec-ch-ua-mobile": "?0",
            "Sec-Fetch-Dest": 'empty',
            "Sec-Fetch-Mode": "cors",
            "Sec-Fetch-Site": "same-origin",
            "X-Requested-With": "XMLHttpRequest"
        }

        #page start => page end
        for i in range(1,2):

            yield scrapy.Request(
                url=self.start_urls[0].format(i, str(int(time.time()*1000))),
                callback=self.parse,
                headers=headers
            )



    def parse(self, response):
        # filename = "rotorbuilds.html"
        # open(filename, 'w').write(str(response.body,'utf-8'))

        sel = scrapy.Selector(response)
        links = sel.xpath('//a[@class="grid-link"]/@href')
        
        headers = {
            "Accept":"text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
            "Accept-Encoding":"gzip, deflate, br",
            "Accept-Language":"zh-CN,zh;q=0.9,en-US;q=0.8,en;q=0.7",
            "Cache-Control":"max-age=0",
            "Host":self.allowed_domains[0],
            "sec-ch-ua":'"Chromium";v="88", "Google Chrome";v="88", ";Not A Brand";v="99"',
            "sec-ch-ua-mobile":"?0",
            "Sec-Fetch-Dest":'document',
            "Sec-Fetch-Mode":"navigate",
            "Sec-Fetch-Site":"none",
            "Sec-Fetch-User":"?1",
            "Upgrade-Insecure-Requests":"1"
        }

        link_ids = self.get_request_url(links)

        for id in link_ids:
            url = "https://rotorbuilds.com/build/"+id

            yield scrapy.Request(
                url = url,
                callback = self.parse_fpv_detail,
                headers = headers
            )

    def get_request_url(self, links):
        link_ids = []
        for link in links:
            l = link.extract()
            id = l.split('/')[-1]
            link_ids.append(id)
        # print(link_ids)
        # add additional id
        if self.add_ids :
            link_ids = self.add_ids

        self.cursor.execute("select spider_id from spider_fpv where spider_id IN(" + ','.join(link_ids) + ")")
        results = self.cursor.fetchall()
        # print(results)
        if len(results) == len(link_ids):
            return []

        _link_ids = copy.deepcopy(link_ids)
        for res in results:
            if str(res[0]) in link_ids:
                _link_ids.remove(str(res[0]))
        # print(_link_ids)
        return _link_ids

    def parse_fpv_detail(self, response):
        #选择器
        sel = scrapy.Selector(response)

        item = RotorbuildsItem()
        item['url'] = response.url
        item['spider_id'] = response.url.split('/')[-1]

        #提取图片
        imgs = sel.xpath('//div[@class="image_preview"]//a/@href')

        photos = []
        for img in imgs:
            # https://rotorbuilds.com/image.php?w=3000&url=/pictures/f_16667_RfgmFT1SL5wtLhCb9ubh5gTb4.png
            url = img.extract()
            url = response.urljoin(url)

            img_request = urllib.request.Request(url)
            img_response = urllib.request.urlopen(img_request)
            get_img = img_response.read()


            #解析图片名称
            img_name = os.path.basename(url)
            upload_path = self.settings['UPLOAD_PATH'] + "/" + time.strftime("%Y") + "/" + time.strftime("%m") + "/"
            img_path = self.settings['PROJECT_PATH']+upload_path

            #生成原图
            if not os.path.exists(img_path):
                os.system("mkdir -p {}".format(img_path))
            with open(img_path + img_name, 'wb') as fp:
                fp.write(get_img)

            #生成小图tiny
            tiny_img = self.generate_tiny_image(img_path, img_name)

            #把图片上传到cos

            self.upload_file_to_cos(img_path+img_name,upload_path+img_name)
            self.upload_file_to_cos(img_path+tiny_img,upload_path+tiny_img)


            # # 设置所有者 UID GID
            # os.chown(img_path + img_name, self.settings['UID'], self.settings['GID'])

            photos.append(self.settings['UPLOAD_PATH']+"/"+time.strftime("%Y")+"/"+time.strftime("%m")+"/" + img_name)

        item['photos'] = ','.join(photos)

        #提取内容（包含HTML）
        content = sel.xpath('//div[@class="description"]/node()').extract_first()
        item['content'] = content

        #提取配件清单
        components = sel.xpath('//div[@id="components"]//tr').getall()

        parts = []
        for component in components:
            component_sel = scrapy.Selector(text=component)
            tag  = component_sel.xpath('//td[@class="tag"]/*/text()').get()
            name = component_sel.xpath('//td[@class="name"]/*/text()').get()
            href = component_sel.xpath('//td[@class="name"]/a/@href').get()
            parts.append(tag+"|||"+name+"|||"+href)

        item['parts'] = "@@@".join(parts)

        yield item

    def parse_parts_img(self, response):
        pass

    def generate_tiny_image(self, path, img):
        #tiny image name
        tiny_img = img.split(".")
        tiny_img = img + ".tiny." + tiny_img[1]

        # 根据图片尺寸压缩图片，单位KB
        img_size = os.path.getsize(path+img) / 1e3
        if (img_size > 2000): # >2M
            self.compress_image(path + tiny_img, path + img, 8)
        elif(img_size > 1000):  # >1M
            self.compress_image(path + tiny_img, path + img, 10)
        elif (img_size > 800):
            self.compress_image(path + tiny_img, path + img, 15)
        elif (img_size > 500):
            self.compress_image(path + tiny_img, path + img, 20)
        elif(img_size > 200):
            self.compress_image(path + tiny_img, path + img, 30)
        elif(img_size > 64):
            self.compress_image(path + tiny_img, path + img, 40)
        else:
            self.compress_image(path + tiny_img, path + img, 50)

        #返回tiny_img名字
        return tiny_img
        

    def compress_image(self, tiny_img, img, quality):
        cv2.imwrite(tiny_img,cv2.imread(img, 1),[cv2.IMWRITE_JPEG_QUALITY, quality])

    def get_img_name(self, url):
        url = urllib.parse.urlparse(url).path
        basename = os.path.basename(url)

        basename_list = basename.split('.')
        print(basename_list)
        if (len(basename_list) == 1):
            img_name = basename + ".jpg"
        else:
            img_name = basename_list[0] + ".jpg"

        return img_name

    def upload_file_to_cos(self,src,dest):
        from qcloud_cos import CosConfig
        from qcloud_cos import CosS3Client
        secret_id = 'AKIDK0PcfDUz8Ks2eZwHVOrjIWRrBmkjxHxt'  # 替换为用户的 secretId
        secret_key = 'xeD8HI3C4G8EXoqqGsQNpr7tJEI0KlDb'  # 替换为用户的 secretKey
        region = 'ap-shanghai'  # 替换为用户的 Region
        token = None  # 使用临时密钥需要传入 Token，默认为空，可不填
        scheme = 'https'  # 指定使用 http/https 协议来访问 COS，默认为 https，可不填
        endpoint = "https://17fpv-1251568218.cos.ap-shanghai.myqcloud.com"
        config = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key, Token=token, Scheme=scheme)
        # 2. 获取客户端对象
        client = CosS3Client(config)
        # 参照下文的描述。或者参照 Demo 程序，详见 https://github.com/tencentyun/cos-python-sdk-v5/blob/master/qcloud_cos/demo.py

        response = client.upload_file(
            Bucket='17fpv-1251568218',
            LocalFilePath=src,
            Key=dest,
            PartSize=1,
            MAXThread=10,
            EnableMD5=False
        )
        return response['ETag'];
