#!/usr/bin/env python
# -*- coding: utf-8 -*-


import json
import logging.config
import os

import sys
import warnings
from datetime import date, datetime, timedelta


import requests
from requests.adapters import HTTPAdapter
import pymysql
import const

from util.notify import push_msg
from concurrent.futures import ThreadPoolExecutor,as_completed
import threading
warnings.filterwarnings("ignore")

# 如果日志文件夹不存在，则创建
if not os.path.isdir("log/"):
    os.makedirs("log/")
logging_path = os.path.split(os.path.realpath(__file__))[0] + os.sep + "logging.conf"
logging.config.fileConfig(logging_path)
logger = logging.getLogger("twitter")


class twitterDownCrawler(object):
    def __init__(self, config):

        self.pic_download = config["pic_download"]  # 取值范围为0、1, 0代表不下载原创微博图片,1代表下载
        self.video_download = config["video_download"]  # 取值范围为0、1, 0代表不下载原创微博图片,1代表下载
        self.down_dir = config["down_dir"]  # 下载目录
        self.re_down_file = config["re_down_file"]  # 重新下载文件
        # 下载图片数据headers
        self.down_headers = {
            "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:103.0) Gecko/20100101 Firefox/103.0",
        }
        # 创建MySQL链接
        self.mysql_config = config.get("mysql_config")  # MySQL数据库连接配置，可以不填
        self.con = pymysql.connect(host=self.mysql_config["host"],
                              user=self.mysql_config["user"],
                              passwd=self.mysql_config["password"],
                              db=self.mysql_config["db"],
                              port=self.mysql_config["port"],
                              charset=self.mysql_config["charset"])
        # 通过cursor()创建一个游标对象
        self.cur = self.con.cursor()


        self.threadPool = ThreadPoolExecutor(max_workers=5, thread_name_prefix="down_")
        self.lock = threading.Lock()

    def start(self):
        """运行爬虫"""
        try:
            logger.info("删除下载错误文件，重新下载")
            if self.re_down_file:
                self.re_down_files(self.down_dir)
            # 下载文件
            if self.pic_download:
                self.get_download_files()

        except Exception as e:
            logger.exception(e)

    def stop(self):
        """运行停止"""
        try:
            self.cur.close()
            self.con.close()
        except Exception as e:
            logger.exception(e)




    def initialize_info(self, user_config):
        """初始化爬虫信息"""
        self.userid = user_config["user_id"]




    # 下载文件
    def get_download_files(self):
        try:
            self.cur.execute("""
                SELECT
                    media.id,media.user_id,media.media_url_https,media.type
                FROM
                    tw_user_media media left join tw_my_follow tmf on media.user_id =tmf.user_id 
                where
                media.type = "photo" 
               
                and down = 0
                    and 
                    exists (
                    select
                        1
                    from
                        tw_my_follow e
                    where
                        e.user_id = media.user_id)
                    and not exists (
                    select
                        1
                    from
                        tw_user_exclude e
                    where
                        e.user_id = media.user_id)
                        
                        limit 10000
                        """)
            res = self.cur.fetchall()
            obj_list = []
            for line in res:

                obj = self.threadPool.submit(self.download_one_file_line, line)
                obj_list.append(obj)

            for future in as_completed(obj_list):
                data = future.result()





        except Exception as e:
            logger.error("同步用户数据失败！")
            logger.exception(e)

    # 下载文件
    def download_one_file(self, url, type, userid):
        """下载单个文件(图片/视频)"""
        try:
            if(type=="photo"):
                url = url[:len(url) - 4] + ".png"

            #
            start = url.split("/")
            name = start[len(start) - 1]
            file_down_path = self.get_filepath(type, userid) + os.sep + name

            if os.path.exists(file_down_path):
               return 1

            logger.info("下载中：" + file_down_path)
            logger.info(str(datetime.now()))
            # 设置代理
            proxies = {'http': 'http://127.0.0.1:10887', 'https': 'http://127.0.0.1:10887'}
            s = requests.Session()
            s.mount(url, HTTPAdapter(max_retries=5))
            flag = True
            try_count = 0

            while flag and try_count < 3:
                flag = False
                downloaded = s.get(
                    url, headers=self.down_headers, proxies=proxies,  verify=False
                )
                if downloaded.status_code == 404 and downloaded.reason == 'Not Found':
                    logger.exception("Not Found")
                    return 1
                if downloaded.status_code == 404 :
                    return

                try_count += 1
                if (
                        url.endswith(("jpg", "jpeg"))
                        and not downloaded.content.endswith(b"\xff\xd9")
                ) or (
                        url.endswith("png")
                        and not downloaded.content.endswith(b"\xaeB`\x82")
                ):
                    flag = True

            # 需要分别判断是否需要下载
            if not os.path.exists(file_down_path):
                with open(file_down_path, "wb") as f:
                    f.write(downloaded.content)

            return 1
        except Exception as e:

            logger.exception(e)
            return 0

    def download_one_file_line(self, line):
        userid = line[1]
        url = line[2]
        type = line[3]

        """下载单个文件(图片/视频)"""
        try:
            if(type=="photo"):
                url = url[:len(url) - 4] + ".png"

            #
            start = url.split("/")
            name = start[len(start) - 1]
            file_down_path = self.get_filepath(type, userid) + os.sep + name

            if os.path.exists(file_down_path):
                self.update_file_line(line)
                return 1

            logger.info("下载中：" + file_down_path)
            logger.info(str(datetime.now()))
            # 设置代理
            proxies = {'http': 'http://127.0.0.1:10887', 'https': 'http://127.0.0.1:10887'}
            s = requests.Session()
            s.mount(url, HTTPAdapter(max_retries=5))
            flag = True
            try_count = 0

            while flag and try_count < 3:
                flag = False
                downloaded = s.get(
                    url, headers=self.down_headers, proxies=proxies,  verify=False
                )
                if downloaded.status_code == 404 and downloaded.reason == 'Not Found':
                    logger.exception("Not Found")
                    self.update_file_line(line)
                    return 1
                if downloaded.status_code == 404 :
                    return

                try_count += 1
                if (
                        url.endswith(("jpg", "jpeg"))
                        and not downloaded.content.endswith(b"\xff\xd9")
                ) or (
                        url.endswith("png")
                        and not downloaded.content.endswith(b"\xaeB`\x82")
                ):
                    flag = True

            # 需要分别判断是否需要下载
            if not os.path.exists(file_down_path):
                with open(file_down_path, "wb") as f:
                    f.write(downloaded.content)
            self.update_file_line(line)
            return 1
        except Exception as e:

            logger.exception(e)
            return 0

    def update_file_line(self, line):
        # 同步
        self.lock.acquire()
        self.cur.execute(
            ' UPDATE tw_user_media SET down=1 WHERE id = %s', line[0])
        self.con.commit()
        self.lock.release()

    # 获取下载目录
    def get_filepath(self, type,user_id):
        """获取结果文件路径"""
        try:
            if len(self.down_dir) < 1:
                self.down_dir = (
                        os.path.split(os.path.realpath(__file__))[0]
                        + os.sep
                        + "twitter"
                )

            file_dir = self.down_dir
            if type == "photo" or type == "video":
                file_dir = file_dir + os.sep + type + os.sep + user_id
            if not os.path.isdir(file_dir):
                os.makedirs(file_dir)
            if type == "photo" or type == "video":
                return file_dir
            file_path = file_dir + os.sep + user_id + "." + type + os.sep + user_id
            return file_path
        except Exception as e:
            logger.exception(e)

    def re_down_files(self , url):
        # 遍历当前路径下所有文件
        file = os.listdir(url)
        for f in file:
            real_url = os.path.join(url, f)
            if os.path.isfile(real_url):
                if os.path.getsize(real_url) < 100:
                    logger.info(os.path.abspath(real_url))
                    try:
                        dirs = real_url.split("/")
                        downUrl = "https://pbs.twimg.com/media/" + dirs[9]
                        downUrl = downUrl[:len(downUrl) - 4] + ".jpg"
                        self.cur.execute("""
                                                UPDATE tw_user_media set down=0 WHERE media_url_https = %s
                                            """ , downUrl)
                        self.con.commit()
                        os.remove(real_url)
                    except Exception as e:
                        logger.exception(e)


                # 如果是文件，则以绝度路径的方式输出
            elif os.path.isdir(real_url):
                # 如果是目录，则是地柜调研自定义函数 scaner_file (url)进行多次
                self.re_down_files(real_url)
            else:

                logger.info("其他情况")
                pass
# 获取配置文件
def get_config():
    """twitterDown.config.json文件信息"""
    config_path = os.path.split(os.path.realpath(__file__))[0] + os.sep + "twitterDown.config.json"
    if not os.path.isfile(config_path):
        logger.warning(
            "当前路径：%s 不存在配置文件 twitterDown.config.json",
            (os.path.split(os.path.realpath(__file__))[0] + os.sep),
        )
        sys.exit()
    try:
        with open(config_path, encoding="utf-8") as f:
            config = json.loads(f.read())
            return config
    except ValueError:
        logger.error(
            "config.json 格式不正确!"
        )
        sys.exit()




def main():
    try:
        logger.info(str(datetime.now()))
        config = get_config()
        tw = twitterDownCrawler(config)
        tw.start()  # 爬取信息
        tw.stop()  # 停止
        logger.info(str(datetime.now()))
        logger.info("=======数据抓取完毕=======")
        if const.NOTIFY["NOTIFY"]:
            push_msg("更新了一次微博")
    except Exception as e:
        if const.NOTIFY["NOTIFY"]:
            push_msg("weibo-crawler运行出错，错误为{}".format(e))
        logger.exception(e)


if __name__ == "__main__":
    main()
