import requests,re,os,time
import http.cookiejar as cj
from  pyquery import PyQuery as  pq
from hashlib import md5
from config.setting import *
from pixiv.file_select_download import FileSelectDownload
from config.user_setting import TRY_LOGIN_COUNT,GOOD_IMAGE_PERCENTAGE,SET_VIEW_COUNT,SET_BOOKMARK_COUNT,SET_LIKE_COUNT,SET_MIN_VIEW_COUNT,HOT_WORD
from config.user_setting import USERNAME,PASSWORD
class Pixiv():
    '''
    :param username:用户名 如果使用配置文件的用户名不用配置
    :param password:密码   如果使用配置文件的密码不用配置
    :param is_proxies:代理 是否使用代理  默认：关闭
    :param cookie: 是否开启记录cookie   默认：开启
    '''
    def __init__(self,username=USERNAME,password=PASSWORD,is_proxies=False,cookie=True):
        s = requests.session()
        s.headers = base_header
        #关闭多余链接
        s.keep_alive = False
        if is_proxies : s.proxies = proxies
        data['pixiv_id'] = username
        data['password'] = password
        self.s = s
        self.cookie = cookie
        self.select_download = FileSelectDownload(s)
        self.login()
    def login_real(self):
        # 注册Cookeiejar类型
        self.s.cookies = cj.LWPCookieJar()
        login_url = 'https://accounts.pixiv.net/login'
        res =  self.s.get(login_url)
        if res.status_code == 200:
            post_key = re.findall('postKey":"(.*?)"',res.text)[0]
            print(post_key)
            data['post_key']=post_key
            self.s.post('https://accounts.pixiv.net/api/login?lang=zh',data=data)
            res = self.s.get('https://www.pixiv.net/')
            if res.status_code == 200 :
                #cookies保存
                self.s.cookies.save(filename='log/cookies.txt', ignore_discard=True, ignore_expires=True)
                print("cookie保存完成")
            print(res.url)
        else:
            print('连接失败')

    def login_cookie(self):
        self.s.cookies = cj.LWPCookieJar(filename='log/cookies.txt')
        print('使用cookie登录')
        try:
            self.s.cookies.load(filename='log/cookies.txt', ignore_discard=True)
            res = self.s.get('https://www.pixiv.net/')
            if(res.status_code == 200):
                print('使用cookie登录成功')
                return True
            return False
        except:
            print('使用cookie登录失败')
            print('开始通过账号密码登陆')
            return False
    def login(self):
        try_count = TRY_LOGIN_COUNT
        while try_count:
            try:
                if(os.path.exists('log/cookies.txt') and self.cookie):
                    if(not self.login_cookie()):
                        self.login_real()
                    else:
                        break
                else:
                    self.login_real()
                    break
            except:
                print("登陆出现问题,10秒后再一次尝试登陆")
                try_count -= 1
                time.sleep(10)
        else:
            print('登陆尝试次数过多，请重新检查网络连接')
            exit(0)

    def parse_page(self,page=-1,mode='daily',rank=-1):
        '''
        爬综合排行榜
        :param page:页数
        :param mode  爬那个排行榜 默认今日排行榜
        :param rank 排名爬取，不能跟page同时传入
        # weekly 星期 monthly 月 rookie 新人 
        # original 原创 male 受男人喜欢的 female 受女人喜欢的
        '''
        # 判断参数传入问题
        if page == -1 and rank == -1:
            print('page或rank没有传入参数')
            return None 
        elif page == -1 and rank != -1:
            page = int(rank / 50) + 1
        elif rank == -1 and page != -1:
            pass
        page_data = {
            'mode':mode,
            'type':'all',
            'p':page
        }
        for i in range(8):
            try:
                res = self.s.get('https://www.pixiv.net/ranking.php',params=page_data)        
                break
            except:
                print('获取url页面失败','3秒后开始','第',(i+1),'尝试')
                time.sleep(3)
        else:
            print('网络连接错误')
            exit(0)
       
        print(res.url)
        doc = pq(res.text)
        items = doc('._layout-thumbnail img').items()
        count = 0
        for  item in items:
            if count < rank  or rank == -1 :
                url = str(item.attr('data-src'))
                url = self.parse_image_url(url)
                self.select_download.downLoad(url)
            count += 1
        print(count)
    

    def parse_page2(self,size,GOOD_IMAGE_FILTER=False):
        '''
        爬取动态,多次调用返回不同
        :param size 上限954
        :param GOOD_IMAGE_FILTER 是否开启图片过滤器
        '''
        if(size >954):
            print("爬取数量太多")
            return
        url = 'https://www.pixiv.net/rpc/recommender.php?type=illust&sample_illusts=auto&num_recommendations=1000&page=discovery&mode=all'
        base_header['Referer'] = 'https://www.pixiv.net/discovery'
        for i in range(8):
            try:
                res=self.s.get(url)    
                break
            except:
                print('获取url页面失败','3秒后开始','第',(i+1),'尝试')
                time.sleep(3)
        json = res.json()
        illust_id = json.get('recommendations')
        count = 0 
        for id in illust_id:
            # test
            if count < size :
                if(not self.parse_image_info(id,GOOD_IMAGE_FILTER)):
                    continue
            count += 1
    def parse_page3(self,word,page=1,GOOD_IMAGE_FILTER=False,hot_word=False):
        '''
        爬取搜索的图片
        :param word :搜索关键词
        :param page :搜索页数
        :param GOOD_IMAGE_FILTER :是否过滤图片
        :param hot_word:如果开启 word请传入user_setting.py中HOT_WORD列表的索引,是数值型
        '''
        if hot_word :
            try:
                word=HOT_WORD[word]
            except:
                print('输入索引不正确')
                return
        search_data = {
            'word' : word,
            'order':'date_d',
            'p':page
        }
        url = 'https://www.pixiv.net/search.php'
        res = self.s.get(url,params=search_data)
        illusts = re.findall('illustId&quot;:&quot;(.*?)&quot',res.text,re.S)

        illusts.extend(re.findall('illust"data-id="(.*?)"',res.text,re.S))
        illusts = set([int(i) for i in illusts])
        for illust in illusts:
            if(not self.parse_image_info(illust,GOOD_IMAGE_FILTER)):
                continue

    def parse_page4(self,id,manga=False,GOOD_IMAGE_FILTER=False):
        '''
        爬取指定画师的所有图片
        :param id 画师的id
        :param manga 是否爬取漫画
        :param GOOD_IMAGE_FILTER 是否开启图片过滤器
        '''
        #https://www.pixiv.net/ajax/user/21062/profile/all
        #illusts
        #manga
        url = 'https://www.pixiv.net/ajax/user/'+str(id)+'/profile/all'
        json = self.s.get(url).json()
        body = json.get('body')
        illusts = [int(i) for i in list(body.get('illusts').keys())]
        if manga : 
            mangas = [int(i) for i in list(body.get('manga').keys())]
            illusts.extend(mangas)

        for illust in illusts:
            if(not self.parse_image_info(illust,GOOD_IMAGE_FILTER)):
                continue

        
        
    def good_image(self,id):
        
        params={
            'mode':'medium',
            'illust_id':id
        }
        for i in range(8):
            try:
                res=self.s.get('https://www.pixiv.net/member_illust.php',params=params)
                break
            except:
                print('判断过滤失败','3秒后开始','第',(i+1),'尝试')
                time.sleep(3)
        text = res.text
        bookmarkCount=int(re.findall('"bookmarkCount":(.*?),',text)[0])
        likeCount = int(re.findall('likeCount":(.*?),',text)[0])
        viewCount = int(re.findall('viewCount":(.*?),',text)[0])
        if self.good_image_calculate(bookmarkCount,likeCount,viewCount) :
            return True

        print('按照过滤标准过滤--这张图片',res.url)
        return False
        
    def good_image_calculate(self,bookmarkCount,likeCount,viewCount):
        if int((bookmarkCount + likeCount) / (viewCount * 1.0)*100) >= GOOD_IMAGE_PERCENTAGE and viewCount >= SET_MIN_VIEW_COUNT:
            return True
        if  viewCount>=SET_VIEW_COUNT or likeCount >=SET_LIKE_COUNT or bookmarkCount>=SET_BOOKMARK_COUNT :
            return True
        return False
        
    def parse_image_url(self,url):
        url = url.replace('c/240x480','')
        url = url.replace('img-master','img-original')
        url = url.replace('_master1200','')
        base_header['Referer'] = url
        if self.s.get(url).status_code  != 200 :
            url = url.replace('jpg','png')
        return url
    def is_more_image(self,id):
        user_id={
                'mode':'manga',
                'illust_id': id
            }
        for i in range(8):
            try:
                res=self.s.get('https://www.pixiv.net/member_illust.php',params=user_id)
                break
            except:
                print('判断是否有多张图片失败','3秒后开始','第',(i+1),'尝试')
                time.sleep(3)
        f = re.findall('您所指定的ID没有投稿多张作品',res.text)
        if(len(f) == 0):
            return True
        return False
    def parse_image_info(self,id,GOOD_IMAGE_FILTER):
        user_id={
                'mode':'medium',
                'illust_id': id
                }

        for i in range(8):
            try:
                res=self.s.get('https://www.pixiv.net/member_illust.php',params=user_id) 
                break
            except:
                print('获取url页面失败','3秒后开始','第',(i+1),'尝试')
                time.sleep(3)
         
        html = res.text
        bookmarkCount=int(re.findall('"bookmarkCount":(.*?),',html)[0])
        likeCount = int(re.findall('likeCount":(.*?),',html)[0])
        viewCount = int(re.findall('viewCount":(.*?),',html)[0])

        # 作品过滤
        if(not self.good_image_calculate(bookmarkCount,likeCount,viewCount) and GOOD_IMAGE_FILTER):
            print('按照过滤标准过滤--这张图片',res.url)
            return False
        if(self.is_more_image(id)):
            doc = pq(res.text)
            items = doc('.item-container img').items()
            for item in items :
                url = item.attr('data-src')
                url = self.parse_image_url(url)
                print(url)
                self.select_download.downLoad(url)
        else:
            url = re.findall('original":"(.*?)"',html)[0].replace('\\','')
            self.select_download.downLoad(url)
        return True
