# import libraries 

from bs4 import BeautifulSoup
import requests
import time
import datetime

import smtplib

import json
import random
from scrapy import Selector
import time
import threading
import queue

def task_one_page(asin, page, ip_port):
    proxy = {
        'http': 'http://{}'.format(ip_port),
        'https': 'http://{}'.format(ip_port)
    }
    
    headers = {
        'authority': 'www.amazon.com',
        "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
        "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36",
    }

    post_data = {
        "sortBy": "recent",
        "reviewerType": "all_reviews",
        "formatType": "",
        "mediaType": "",
        "filterByStar": "",
        "filterByAge": "",
        "pageNumber": 10,
        "filterByLanguage": "",
        "filterByKeyword": "",
        "shouldAppend": "undefined",
        "deviceType": "desktop",
        "canShowIntHeader": "undefined",
        "pageSize": "10",
        "asin": "",
        "scope": "reviewsAjax0"
    }
    # 翻页关键payload参数赋值
    post_data["pageNumber"] = page,
    post_data["reftag"] = f"cm_cr_getr_d_paging_btm_next_{page}",
    post_data["scope"] = f"reviewsAjax{page}",
    post_data["asin"] = asin
    # 翻页链接赋值
    spiderurl=f'https://www.amazon.com/hz/reviews-render/ajax/reviews/get/ref=cm_cr_arp_d_paging_btm_next_{page}'
    
    success_flag = False 
    try:
        res = requests.post(spiderurl,headers=headers,data=post_data, proxies=proxy)
        if res and res.status_code == 200:
            res = res.content.decode('utf-8')
            # print(res)
            success_flag = True
            contents = res.split('&&&')
            for content in contents:
                infos = content.split('","')
                info = infos[-1].replace('"]','').replace('\\n','').replace('\\','')
                # 评论内容判断
                if 'data-hook="review"' in info:
                    sel = Selector(text=info)
                    data = {}
                    data['username'] = sel.xpath('//span[@class="a-profile-name"]/text()').extract_first() #用户名
                    data['point'] = sel.xpath('//span[@class="a-icon-alt"]/text()').extract_first() #评分
                    data['date'] = sel.xpath('//span[@data-hook="review-date"]/text()').extract_first() #日期地址
                    data['review'] = sel.xpath('//span[@data-hook="review-title"]/span/text()').extract_first() #评价标题
                    data['detail'] = sel.xpath('//span[@data-hook="review-body"]').extract_first() #评价内容
                    image = sel.xpath('div[@class="review-image-tile-section"]').extract_first()
                    data['image'] = image if image else "not image" #图片
                    print(data)
    except:
        print('something wrong happen!')
    
    return success_flag

def get_proxies_json():
    # 1
    # api = 'https://tq.lunaproxy.com/getflowip?neek=1219342&num=50&type=2&sep=1&regions=all&ip_si=1&level=1&sb='
    # 2
    # api = 'https://tq.lunaproxy.com/getflowip?neek=1219342&num=50&type=2&sep=1&regions=all&ip_si=2&level=1&sb='
    # 3
    api = 'https://tq.lunaproxy.com/getflowip?neek=1219342&num=25&type=2&sep=1&regions=all&ip_si=2&level=1&sb='
    resp = requests.get(api)
    resp_txt = resp.text
    # read as json
    resp_dict = json.loads(resp_txt)
    # check the content of the response json
    if resp_dict['code'] == 0:
        return resp_dict['data']
    else:
        return None

def get_one_proxy():
    proxy_ls = get_proxies_json()
    if proxy_ls is not None:
        proxy_dict = random.choice(proxy_ls)
        ip = proxy_dict['ip']
        port = proxy_dict['port']
        ip_port = '{ip}:{port}'.format(ip=ip,port=port)
        return ip_port
    else:
        return None


class ProxyManager:
    def __init__(self, max_parallelism_cnt=1, min_num_con_threads=3) -> None:
        # self._api = 'https://tq.lunaproxy.com/getflowip?neek=1219342&num=25&type=2&sep=1&regions=all&ip_si=2&level=1&sb='
        self._api = 'http://need1.dmdaili.com:7771/dmgetip.asp?apikey=94f28555&pwd=f1f668bba8a18399ade56909d6bbd5e8&getnum=5&httptype=1&geshi=2&fenge=1&fengefu=&Contenttype=2&operate=all'
        self._blocked_ips = set()
        # self._ip_port_pool = set()
        # {
        #   "192.168.0.1:1234": {
        #       "parallelism_cnt": 0
        #   } 
        # }
        self._ip_port_recorder = dict()
        self._lock = threading.Lock()
        self._has_vacant = threading.Condition(self._lock)
        self._max_parallelism_cnt = max_parallelism_cnt
        # minimum of degree of parallelism
        self._min_num_con_threads = min_num_con_threads  


    def _request_for_proxies(self):
        # retrieve ips using api
        resp = requests.get(self._api)
        resp_txt = resp.text
        # read as json
        resp_dict = json.loads(resp_txt)
        # check the content of the response json
        proxy_ls = []
        print('API is called!')
        if resp_dict['code'] == 0:
            proxy_ls = resp_dict['data']
            return proxy_ls
        else:
            return None

    # must be used with lock acquired
    def _refresh_pool_helper(self, proxy_ls):
        # gather all known ips
        valid_ips = {x.split(':')[0] for x in self._ip_port_recorder.keys()}
        
        known_ips = valid_ips | self._blocked_ips

        # add the new ip(with port) to vacant pool
        for d in proxy_ls:
            ip = d['ip']
            port = d['port']
            ip_port = '{ip}:{port}'.format(ip=ip, port=port)
            if ip not in known_ips:
                # initialize the parallelism_cnt to 0
                self._ip_port_recorder[ip_port] = {
                    "parallelism_cnt": 0
                }
                self._has_vacant.notify()  # notify the waiting threads 
            known_ips.add(ip)
        

    def refresh_pool(self):
        proxy_ls = self._request_for_proxies()
        if proxy_ls is not None:
            with self._lock:
                self._refresh_pool_helper(proxy_ls=proxy_ls)
            

    def get_one(self):
        def search_one():
            # find the currently least used ip_port
            min_num_para = self._max_parallelism_cnt + 1
            ip_port_min_used = None
            
            for ip_port, details in self._ip_port_recorder.items():
                num_para = details['parallelism_cnt']
                if num_para < min_num_para:
                    ip_port_min_used = ip_port
                    min_num_para = num_para

            max_threads = self._max_parallelism_cnt * len(self._ip_port_recorder)   # number of threads that can be created
            
            # too few ips left
            if max_threads < self._min_num_con_threads:
                proxy_ls = self._request_for_proxies()
                if proxy_ls is not None:
                        self._refresh_pool_helper(proxy_ls=proxy_ls)
            
            if ip_port_min_used is not None and min_num_para < self._max_parallelism_cnt:
                return ip_port_min_used     # an vacant ip_port found 
            else:
                return None
    

        with self._lock:    
            while True:
                res = search_one()
                if res is None:
                    self._has_vacant.wait()
                else:
                    break
            self._ip_port_recorder[res]['parallelism_cnt'] += 1
            return res
        
    
    def return_one(self, ip_port, is_blocked):
        with self._lock:
            ip, _ = ip_port.split(':')
            if is_blocked:
                self._blocked_ips.add(ip) # record the ip as being blocked
            self._ip_port_recorder[ip_port]['parallelism_cnt'] -= 1
            if not is_blocked:
                self._has_vacant.notify()  # notify the waiting threads 
            if ip in self._blocked_ips and self._ip_port_recorder[ip_port]['parallelism_cnt'] == 0:
                self._ip_port_recorder.pop(ip_port) # remove the ip from the recorder
                self._has_vacant.notify() # notify the waiting threads to refresh the pool if condition met


class Task:
    def __init__(self, asin, page, stars) -> None:
        self.asin = asin
        self.page = page
        self.stars = stars
        # self.ip_port = ip_port
        self.success_flag = False
    
    

class AmazonSpider:
    def __init__(self, asin, num_threads=5, max_parallelism_cnt=1) -> None:
        self._asin = asin
        self._proxy_manager = ProxyManager(max_parallelism_cnt)
        self._task_queue = queue.Queue(num_threads=5)
        # self._spider_thd_queue = queue.Queue()
    
    def createSpiderThread(self, asin, page, stars, ip_port, num_attempts):
        return AmazonSpider.SpiderThread(self, asin, page, stars, ip_port, num_attempts)
    
    # custom thread
    class SpiderThread(threading.Thread):
        # constructor
        def __init__(self, outer, asin, page, stars, ip_port, num_attempts):
            threading.Thread.__init__(self)  # execute the base constructor
            self._outer = outer
            self.ip_port = ip_port
            self.data = None
            self.asin = asin
            self.page = page
            self.stars = stars
            self.num_attempts = num_attempts
            self._max_num_attempts = 3
        
        # function executed in a new thread
        def run(self):
            success_flag, data = self._get_one_page(None, None)
            if success_flag is True:
                self.data = data
            is_blocked = not success_flag
            self._outer._proxy_manager.return_one(self.ip_port, is_blocked)
            # self.is_failed = success_flag
            if success_flag is False and self.num_attempts <= 3:
                self.data = self._outer._get_spider_data(, self.num_attempts + 1)
            
            # if success_flag is True:
            #     self._outer.self._task_queue.put(Task(asin=self.asin, page=self.page, stars=self.stars))

        def _get_one_page(asin, page, ip_port):
            proxy = {
                'http': 'http://{}'.format(ip_port),
                'https': 'http://{}'.format(ip_port)
            }
            
            headers = {
                'authority': 'www.amazon.com',
                "accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
                "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36",
            }

            post_data = {
                "sortBy": "recent",
                "reviewerType": "all_reviews",
                "formatType": "",
                "mediaType": "",
                "filterByStar": "",
                "filterByAge": "",
                "pageNumber": 10,
                "filterByLanguage": "",
                "filterByKeyword": "",
                "shouldAppend": "undefined",
                "deviceType": "desktop",
                "canShowIntHeader": "undefined",
                "pageSize": "10",
                "asin": "",
                "scope": "reviewsAjax0"
            }
            # 翻页关键payload参数赋值
            post_data["pageNumber"] = page,
            post_data["reftag"] = f"cm_cr_getr_d_paging_btm_next_{page}",
            post_data["scope"] = f"reviewsAjax{page}",
            post_data["asin"] = asin
            # 翻页链接赋值
            spiderurl=f'https://www.amazon.com/hz/reviews-render/ajax/reviews/get/ref=cm_cr_arp_d_paging_btm_next_{page}'
            
            success_flag = False
            data = None 
            try:
                res = requests.post(spiderurl,headers=headers,data=post_data, proxies=proxy)
                if res and res.status_code == 200:
                    res = res.content.decode('utf-8')
                    # print(res)
                    success_flag = True
                    contents = res.split('&&&')
                    for content in contents:
                        infos = content.split('","')
                        info = infos[-1].replace('"]','').replace('\\n','').replace('\\','')
                        # 评论内容判断
                        if 'data-hook="review"' in info:
                            sel = Selector(text=info)
                            data = {}
                            data['username'] = sel.xpath('//span[@class="a-profile-name"]/text()').extract_first() #用户名
                            data['point'] = sel.xpath('//span[@class="a-icon-alt"]/text()').extract_first() #评分
                            data['date'] = sel.xpath('//span[@data-hook="review-date"]/text()').extract_first() #日期地址
                            data['review'] = sel.xpath('//span[@data-hook="review-title"]/span/text()').extract_first() #评价标题
                            data['detail'] = sel.xpath('//span[@data-hook="review-body"]').extract_first() #评价内容
                            image = sel.xpath('div[@class="review-image-tile-section"]').extract_first()
                            data['image'] = image if image else "not image" #图片
                            print(data)
            except:
                print('something wrong happen!')
            
            return success_flag, data

    def _get_spider_data(self, stars, page, num_attempts):
        ip_port = self._proxy_manager.get_one()
        spider_thd = self.createSpiderThread(self._asin, stars, page, ip_port, num_attempts)
        spider_thd.start()
        spider_thd.join()
        return spider_thd.data
        
        

    def _task_creator_n_(self):
        tsk_id = 0
        for stars in range(1, 6):
            for page in range(1, 11):
                self._task_result[tsk_id] = None
                self._get_spider_data(tsk_id, stars, page)
        
        # self._task_queue.put(None)

                # ip_port = self._proxy_manager.get_one()
                # spider_thd = self.createSpiderThread(self._asin, stars, page, ip_port)
                # spider_thd.start()
                # self._spider_thd_queue.put(spider_thd)
                tsk_id += 1

        self._spider_thd_queue.put(None)
        while True:
            if self._spider_thd_queue.
        

    def _task_restarter():
        if self._spider_thd_queue.get()

    def _task_consumer(self):
        while True:
            tsk = self._task_queue.get()
            if tsk is None:
                self._spider_thd_queue.put(None)
                break
            asin = tsk.asin
            stars = tsk.stars
            page = tsk.page
            ip_port = self._proxy_manager.withdraw_vacant_ip_port()
            # thd = threading.Thread(target=self._task_one_page, args=(asin, stars, page, ip_port))
            spi_thd = self.createSpiderThread(asin, stars, page, ip_port)
            # put the thread into a queue
            self._spider_thd_queue.put(spi_thd)

    def _proxy_recycle_helper(self):
        while True:
            spi_thd = self._spider_thd_queue.get()
            if spi_thd is None:
                break
            thd = threading.Thread(target=self._recycle, args=(spi_thd))
            thd.run()
            # self._consumer_thd_queue.task_done()

    
    def _recycle(self, thd: SpiderThread):
        thd.join()
        if thd.is_failed:
            # inform the proxy manager
            self._proxy_manager.remove_ip(thd.ip_port)
        else:
            # put the proxy back to _proxy_queue
            self._proxy_queue.put(thd.ip_port)


    def run(self):
        self._proxy_manager.refresh_pool()

        # called every time issue a new task
        ip_port = self._proxy_manager.withdraw_vacant_ip_port()
        
        # 启动生产者线程
        producer_thread = threading.Thread(target=self._producer)
        producer_thread.start()
    
        # 启动消费者线程
        consumer_thread = threading.Thread(target=self._consumer)
        # consumer_thread.daemon = True  # 确保消费者线程随主线程结束
        consumer_thread.start()
        # producer_thread.join()

        helper_thread = threading.Thread(target=self._proxy_recycle_helper)
        helper_thread.start()

        self._task_queue.join()

        for stars in range(1, 6):
            for page in range(1, 11):
                asin = self._asin
                tsk = threading.Thread(target=self._task_one_page, args=(asin, stars, page, ip_port))

        

def func1(pm):
    pm.refresh_pool()

def func2(pm):
    pm.get_one()


if __name__ == '__main__':
    # # B0BJLDJZPR
    # ip_port = get_one_proxy()
    # if ip_port is not None:
    #     task_one_page('B0BJLDJZPR', 4, ip_port)
    # ip_port = '43.159.18.19:28966'
    # k, c = ip_port.split(':')
    # print(k, c)
    

    # # test
    # pm = ProxyManager()
    # # pm.refresh_pool()
    # # print(pm.withdraw_vacant_ip_port())
    # q = queue.Queue(3)
    # print(q.qsize())

    pm = ProxyManager()
    pm.refresh_pool()
    s = set()
    for i in range(4):
        ip_port = pm.get_one()
        s.add(ip_port)
    print(s)
    
    ip_port = s.pop()
    print(s)
    pm.return_one(ip_port, True)

    ip_port = s.pop()
    print(s)
    pm.return_one(ip_port, True)

    ip_port = s.pop()
    print(s)
    pm.return_one(ip_port, True)

    ip_port = pm.get_one()

    # task_one_page('B0BJLDJZPR', 1, ip_port)

    pm.return_one(ip_port, False)
    ns = set()
    for i in range(2):
        ns.add(pm.get_one())    
    print(ns)
    
    thd2 = threading.Thread(target=func2, args=(pm,))
    thd1 = threading.Thread(target=func1, args=(pm,))

    thd2.start()

    thd1.start()

    thd2.join()
    # lock = threading.Lock()

    # with lock:
    #     with lock:
    #         print(2)
