from rest_framework.response import Response
from rest_framework.views import APIView
import sys
from webapi.tools  import * # 引入脚手架php适配工具库

class generalSearch( APIView ):
    def post(self, request):
        import requests
        from bs4 import BeautifulSoup
        import re
        import time

        data = json_decode(request.body.decode())
        inputText = data.get('inputText')

        amazon_goods_list = []
        aliexpress_goods_list = []
        ebay_goods_list = []
        list_to_insert = []

        # 超时 你可以告诉 requests 在经过以 timeout 参数设定的秒数时间之后停止等待响应
        timeout = 10
        count = 0

        # 代理ip获取次数，如果过多则停止
        ip_get_count = 0

        # 定制请求头,比如设置伪装的用户代理,用户cookie, 跳转链接
        amazon_headers = {
            'User-Agent':
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36',
            'Connection':
            'keep-alive',
            #    'Cookie':
            #    'session-id=130-1833310-5969908; session-id-time=2082787201l; ubid-main=132-4901687-9694113; lc-main=zh_CN; i18n-prefs=USD; session-token="KQUI9rUJxZi8aoAZrc+Gv1VxBoEX2Exfah/inAGd0+HM8otFQnDcW0EKXl4+g1Tz0iXwfEa6JmKue11GfWjnrFrxMLpPSmAqbZlnDris9nxGOHsUgM89gLEToRlhuJXDrMKFHy+cwSMvoNoJS1/RNdFlTceDAiT9NbtjflWQGQVTaw/w0MP3CfZhhoCWvmZW9ocmImCBgPIiqxzV76EEI73KJWOAQ8C3ArpNzKC6soE="; x-main="n2C7dIWBTMn6Q45SlLw1CSqebhMV2D8b6qv@5O0gc93mKFtdeTCFKoByYBDDksMT"; at-main=Atza|IwEBICDbRln2ER3_S2nrQk4mETSiV7mer6yMy1K1wWced216NP20Q1mre0Ih86vTJVSCvKKOdRNj_yYxcb00QCylD4waDeWVdCnW9FUrYNdIRfV5xOep7cRHj3dsBhWLmy-1nbwCD4RMRNtck-Qpt72mkJ3vlt52LKAj4AAuiw9IkzBAUJFeq9tOfeEIXDOTMx9J5klSndg3tzfwwYuIFZ6hXbehJHUYJBYMtfOEN_prfp8rat9g_lU3ejLpOx6bCqH56Z346d5nwRMbH2fV0mCvzRN0KSFREh8kvxTmM_wtdc-WGcoz_yz7SQz9yTj3OU_5eAi15Q22vN5GX6YS-Zft4IN4gJOwXDyl74_wHbfaB5U6BESgn9xn69qrgWsmArAuMobyEYv2ZoPTLSYMuSfTryce; sess-at-main="C2Gpg8yu803/WE/YymWJRFLX7HykEkJ/vuLMZmB1tnk="; sst-main=Sst1|PQH-ur_EcZni5ONUgLeHIj-KC3bZkm30QwhvSL0GHwax9Z9Y2kJj70us-b5grurPPK7h4GLJ2hD9pDXsI8oGscN6C7LN9NwOI5gKS_zvmlnUVMdkbQugpusONa2GGZSvmZCe9drQsjaAwR7h91URRG0TbUXxXACjYSQlxCmqqW61I9OjRQbSPPuDEe_DcQQTfw7BjZN0Q_H89rz7-6wS6upJHnZvCOTK6uzGjL5RzWdnSvNUWTkPiUpXJzN5vKbJVBdN-rScXf25Pt7NmyAm2ShU48EBub3LG2wA9_NuQgEJlpUWCKCMWJHcOOpy9gb41TgBfQSLQi_st5UzrT3luf-6Tg; x-wl-uid=1KjzVksaEYfMX+sb5U8Uw77h298sn/TBinnYll1VYcgySfIhlMGYsu4kCqXWQXVRQflFbWeNSTj56VBhdCOyD7idkH68Jx9xXokxOJnPU4G9okmdwItv2EmEnAfI2jTmfnLH70rUtw6M=; csm-hit=tb:PTKFSKCYH9S0F086K8A9+b-5N5GNTZW4E6SHDJEE2YQ|1549012363448&t:1549012363448&adb:adblk_yes',
            #    'referer':
            #    'no-referrer-when-downgrade'
        }

        # 获取输入项搜索
        keywords = inputText
        
        if empty(keywords):
            return Response({"status": False, 'message':'请输入长尾词内容'})

        print(request.session['ADMIN_SESS'])
        return Response({"status": True})
        sys.exit('____代理ip预检测____')

        auto_get_proxies = ''  # 记录自动拿到的代理ip

        # 循环获取知道拿到代理ip
        while True:
            ip_get_count += 1
            # 方式一(免费)、scylla正向代理获取,前提开启scylla项目,验证可用性
            # 获取代理ip  --开启并运行scylla项目, 使用HTTP 正向代理服务器模式, 获取scylla自动化爬取到的代理ip
            # proxies_ip = requests.get('http://api.ipify.org', proxies={'http': 'http://127.0.0.1:8081'})

            # 方式二(付费)、接口获取高可用的代理ip,并验证可用性
            proxies_ip = requests.get('http://api.ipify.org', proxies={'http': 'http://127.0.0.1:8081'})

            port = 80
            print('获取到ip:' + proxies_ip.text)

            # 测试获取到的代理ip是否可用
            if proxy_test(proxies_ip.text, False, port):
                auto_get_proxies = "http://" + proxies_ip + ':' + port,
                break
            else:
                if (ip_get_count == 10):
                    print('超过限定次数次数，代理ip获取失败。')
                    break
                else:
                    continue
                    
        # 设置代理ip
        proxies = {
            'http': auto_get_proxies,
            #   'https': 'https://43.230.193.117:34698' 
        }

        sys.exit('________成功___________')
        # amazon商品数据爬取 # amazon商品数据爬取
        # amazon_list_html = requests.get(
        #    'http://www.amazon.com/s/ref=nb_sb_noss?field-keywords=' + keywords +
        #    '&ie=UTF8&language=zh_CN&url=search-alias%3Daps',
        #    timeout=timeout,
        #    headers=amazon_headers,
        #    # proxies=proxies,
        #    # cookie='',
        #)
        #
        # 获取内容
        # amazon_html_doc = amazon_list_html.text

        # Beautiful Soup支持Python标准库中的HTML解析器,还支持一些第三方的解析器
        # 推荐使用lxml解析库，必要时使用html.parser
        # soup = BeautifulSoup(amazon_html_doc, "lxml")

        # 获取网页的title
        # print(soup.title)

        # 所有商品分区
        # div_all = soup.find_all(
        #     name="div",
        #     class_="a-row a-spacing-none scx-truncate-medium sx-line-clamp-2")
        # div_all = soup.find_all(
        #    name="span",
        #    attrs={"data-component-type": "s-product-image"})

        # if empty(div_all):
        #    amazon_goods_list = ''
        # else:
        #    # 将数据集合存储进入数据库
        #    from webapi.model.AmazonSearch import AmazonSearch # 引入亚马逊搜索表模型
    #
        #    for div_item in div_all:
    #
        #        # amazon一页过多商品, 处理前5条有效的
        #        if count == 5:
        #            break
    #
        #        # 定义单个商品字典,每次循环重新定义得到新的地址以便传入列表
        #        goods = dict({
        #            "goods_name": '',
        #            "goods_attr": '',
        #            "goods_price": '',
        #            "comment_num": '',
        #            "detail_href": '',
        #            "comment_star": '',
        #            "price_quality": '',
        #        })
    #
        #        # 防止被封禁
        #        time.sleep(0.3)
        #        # 从a标签获取连接和商品名
        #        a_item = div_item.find('a')
        #        goods['detail_href'] = 'https://www.amazon.com' + a_item.get('href')
    #
        #        # 从a标签下的img标签获取名字
        #        goods['goods_name'] = a_item.img.get('alt')
    #
        #        # 根据链接去爬取详情页中更完善的信息
        #        goods_detail_html = requests.get(
        #            goods['detail_href'],
        #            timeout=timeout,
        #            headers=headers,
        #            
        #        )
    #
        #        # 详情页内容
        #        goods_detail_html_doc = goods_detail_html.text
        #        goods_soup = BeautifulSoup(goods_detail_html_doc, "lxml")
        #        try:
        #            goods_price_str = goods_soup.find(
        #                name="span", id="priceblock_ourprice").string
        #            comment_star_str = goods_soup.find(
        #                name="span", id="acrPopover").get('title')
        #            comment_num_str = goods_soup.find(
        #                name="span", id="acrCustomerReviewText").string
        #        except AttributeError:
        #            # 这件商品不适用于普通提取规则, 跳过
        #            print('pass')
        #            continue
    #
        #        count +=1
    #
        #        if not goods_price_str:
        #            continue
        #        # 针对爬取出的初略数据进一步提取解析
        #        goods['comment_num'] = int(
        #            re.sub(',', '', re.sub("customer review", '', re.sub("customer reviews", '', comment_num_str))).strip())
        #        
        #        goods_price_first = re.sub(',', '', re.sub("\\$", '', goods_price_str)).strip()
    #
        #        # 开始判断是否最终数字
        #        if is_number(goods_price_first):
        #            goods['goods_price'] = float(goods_price_first)
        #        else:
        #            goods['goods_price'] = goods_price_first.split('-')[0]
    #
        #        goods['comment_star'] = float(
        #            re.sub("stars", '', re.sub("out of 5", '', comment_star_str)).strip())
    #
        #        # 使用字典准备模型数据
        #        list_to_insert.append(AmazonSearch(**goods))
    #
        #        goods['search_from'] = 'amazon'
        #        amazon_goods_list.append(goods)
        #        
        #    # 推荐指数: [评论数排名(负) + 星级排名*1.5(负) + 价格排名(正)] / [3 * (参与排名数/10)(向上取整)]
        #    AmazonSearch.objects.bulk_create(list_to_insert)

        aliexpress_headers = {
            'User-Agent':
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36',
            'Connection':
            'keep-alive',
        #    'Cookie':
        #    'ali_apache_id=10.103.184.127.1551260144679.183240.0; _uab_collina=155178180867726559328343; aep_common_f=unX5lo8+tGYTN+/BDJmnePVikg7jFZNNFGN3A3/21L3fmPDk7tH6FA==; intl_locale=en_US; _m_h5_tk=c3a26a175de3abda9946600058b4673e_1551951322640; _m_h5_tk_enc=c8e346990b79b31066000e3b6b26b8c5; acs_usuc_t=acs_rt=4a10b2f3d2c7493894024d2a420e539c&x_csrf=qu6vkovmb_hs; _hvn_login=13; xman_us_t=x_lid=cn259243477lahae&sign=y&x_user=hk8+4O5lvNzD91uayk6I8Hli5HsgiEMkd3aHWijjbG4=&ctoken=9ym9n1ivswxp&need_popup=y&l_source=aliexpress; aep_usuc_t=ber_l=A0; xman_f=fhPgI9JVA4gP+1D7dY97OItSe7yxidBM3NqolvCKPnN2W0plDFGsKTa3hzHK2y/oCiU1kV3qfUfqZEhb1Q0+ZZGGA69Kz4LXqhJBhP9HuKwj55fWndm3XfIfMwCSuXj0dJt/ZrGc3qtg20dq0qhFNA9ic5EZ8RfE7Ub+/4hwdmUIMlVMF/vAbyKs/degByNvBo5XruJJcYCO7FOJ0QO6M34N21nyzBIsM9xfiKso27uLOjkLTChzJOCl0wEfTSMSbrzGZVyouEbG9MBNmRuarAyVE7FoK5IkSkk9uOCE863Ju/1hvisWaMrl9IDDK8ErPZKGn4fyto07DxO1E0ZGdTHmfmdtvhorK5hbvJzjcZEaZ9D6aY1lnMmzsqL1Eu0J3u5gmhXHVs2mJg2mAVNyhA==; aep_history=keywords%5E%0Akeywords%09%0A%0Aproduct_selloffer%5E%0Aproduct_selloffer%0932878225568%0932849740603%0932847270198; JSESSIONID=E06F772709ACF6B73FC1FB64C457FC3F; _mle_tmp0=iiCGajxLJhPRfqiVFROq8gKsWX%2FFGT%2BlxwLe4RZU%2By9luDV8gZkCGynEpgM09PDwi%2FkhovUnC3OLPWFEScMKlbdDMGOZaT1%2FuF6yCE24JonF0CKw4Q6VhM2IhAmTK%2BWu; xman_us_f=zero_order=y&x_locale=en_US&x_l=1&last_popup_time=1551783025621&x_user=CN|XIAOZHEN|GUO|ifm|1858822477&no_popup_today=n; aep_usuc_f=site=glo&c_tp=USD&x_alimid=1858822477&isb=y&region=US&b_locale=en_US; _mle_tmp_harden0=COMyBPKnGxIg1PC5i8KdrxmcyemP1dXYgX7z0s%2BtalQeEJFnLR8HNeeeioboivOdXCoMstjqBFmMTwQxaelmEPWT%2Fe7b8hLGXNnMKbf9VwwBKJIDuqlaqpPuUwfdTPaB; isg=BDc32zmTJ44wSqP0IOACeMQdxist5ZxSyHTCu4nkWYZtOFd6kM8FrhkSHtjD0OPW; ali_apache_track="ms=|mt=1|mid=cn259243477lahae"; ali_apache_tracktmp="W_signed=Y"; intl_common_forever=dAwVXbZlqNCor8jyRAlUrE2GrVk5vWnXpg3ngQGSRv8jBAq/glF2tQ==; xman_t=R5OIrV6uh1w5bDVyAV2TVReMyYLNRisLlAkqFh1+87jOttVLSY4B9JTmpQ5PXzGRH4aWQMei5oVKGIWWFJPn+F3liYSC5CT2Z0U4Rdjelae8uf953Fbrq5h1v6ZYpdHdD2CMk7mhSvTb3mrrVXfiUHrVy/1J6fYEJm5QHjHjYX/A046CT7k/ev9gaSn68sDd19r5jP5am0yfvj7vogDLV++p8VYCTC32+9yfB/ejR4+P5g6AY0+F90KkX3muKU7X77lFedPpHXg9YhEVhmqT9Omfq8ifYHqyud9O4Zjyb3DcPcDRF8OTug9basQo5fL+0+wyhbV5tJNcvzuMjPAOOhXvRMytjMmbr92kgjE6+MPIBKJ2KwuoGMs7BkoSBKTDJSAL5F5TPbqMPda5VgU/dSK6w67XAwt1xrsvRdAJ+OMsqUUNt089H0IJRrzQ4GjbW6QPm3sZo39HofgwEqjPUNUjHvCjW3yjh/1MoJrHTkWSd92kx3sdIkbb8UOsRfX9ocAEft2krUtvOPte6upwe1360YSrWj/0uSA3Pz17RoNNRAdb3y02sKlXCapPP5Cs7IWMR0E6Z26wxAFPo/HffRIrPdBeBW8D6zWhyP8JBOWNn8rYinZ1L9h/Ey1wRVnS',
            'referer':
            'https://www.aliexpress.com'
        }
        # aliexpress商品数据爬取
        aliexpress_list_html = requests.get(
            'http://www.aliexpress.com/wholesale?page=1&g=y&SearchText=' + keywords,
            timeout=timeout,
            headers=aliexpress_headers,
            proxies=proxies,
        )

        # 获取内容
        aliexpress_html_doc = aliexpress_list_html.text

        # Beautiful Soup支持Python标准库中的HTML解析器,还支持一些第三方的解析器
        # 推荐使用lxml解析库，必要时使用html.parser
        soup = BeautifulSoup(aliexpress_html_doc, "lxml")

        # 获取网页的title
        print(soup.title)
        print(aliexpress_html_doc)
        # 所有商品分区
        # div_all = soup.find_all(
        #     name="div",
        #     class_="a-row a-spacing-none scx-truncate-medium sx-line-clamp-2")
        div_all = soup.find_all(
            name="div",
        #    _class="top-lighthouse",
        _class="util-clearfix temp-height lazy-load list-items"
        )

        print(div_all)
        sys.exit(0)
        return Response({"status": True, 'list':amazon_goods_list, 'total_num':len(amazon_goods_list)})
