# !/usr/bin/env python
# encoding: utf-8
'''
  @author: HJW
  @license: (C) Copyright 2013-2017, Node Supply Chain Manager Corporation Limited.
  @contact: hejunwang01@126.com
  @file: myspider.py
  @time: 2022/8/8 0008 下午 23:54
  @desc:
  '''
import os.path
from flask import Blueprint,request,jsonify
import logging
import json
import requests
# 注册的蓝图 ,url_prefix 在所有的路径前面增加/user
bp = Blueprint('spider',__name__,url_prefix='/spider')
logger = logging.getLogger(__name__)


# @bp.route('/v1/myspider',methods=['GET'])
def myspider():
    """ 百度翻译 """
    # 1指定地址 url
    url = "https://fanyi.baidu.com/sug"
    # 2 .发送请求
    headers = {
        "User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36"
    }
    data ={
        "kw": "dog"
    }
    # 返回的是 Content-Type: application/json  json格式
    # 3.获取到相应的数据
    response = requests.post(url=url,data=data,headers = headers)
    res = response.json()
    # 4. 持久化数据
    fp = open('./myfile.txt','w',encoding="utf-8")
    json.dump(res ,fp=fp,ensure_ascii=False)
    print("myspider over!!!")



def yaojianju():
    #     请求地址
    url ="https://www.nmpa.gov.cn/datasearch/data/nmpadata/search?"
    #f发送请求
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36"
    }
    data = {
        "hKHnQfLv":"5cZgCPKuEhZnyVY845BILUwAMFf.vrw9gAhaqMquJ9purPkb.Q8ctZoyV92aaTlFmpKOtozTnZquBET_J2s11dlY.rPJhagyps22h.da.UD3T9VvXLJiSqgx_j9v.6PLeCKJRD4sgCsKWvJ0PoLN8tiBH53XhvlafUc24fB6YGJ88OJoRJJZGc34oB6BY0zQNDLiYAHYoUEZzvoxe5q.bB_GG8JR596LrXxpPfwUqpHIeO8tlgKOTe0VaiLE8yLnSsAdI5Qqp7OGfSE49p4ZMskBEWpwtKvTkNicuqfMOCwAhjX.MTtUp7Opa6ZDpunuL442fPx4vSEBM1QxX0zhmLq",
        "8X7Yi61c": "4Fq9G5yfvBeZSqmXcgXOn2XwFQqM5v6A_2tModSH_Sb7FhmmZfMMGiK4lpvAJzXYWmab6tNzrWfotf1kONZtNfHA15Aocn2zfMFGZT6EhdP9D6gvM_GUM9YBcobBiSQNzDKsPDdkN6kwzQSG2zb1oSi37IgpETNUMKVyXJBRaJxAaElHZ_skVY0PI2Cx2NzvZpuxBYP5Vb5ip0x25I72eAvqBimi8Mwcqqrv8z5ykjyTGPgHyWH8qKyVIMPEppZ1YCLFMQJp1EG3UrhKWeA83Gea2YTNZjpf8hWuoeSSike0"
    }

#     请求数据
    response = requests.get(url=url,headers=headers,data=data)
    res = response.json()
    print(res)


from lxml import  etree
def usexpath():
    '''
    - /:表示根节点
    - //: 表示多层级
    -属性定位 :  //div[@class="xsdf"]
    - 索引定位 : //div[@class="xsdf]/p[3]  索引从1开始
    - 取文本 :
        - /text()  获取标签中直系文本内容
        - //text()  获取标签中非直系文本内容
    - 取属性
        - /@attname     =>  img/src
    :return:    58二手房 房源信息
    '''

    url = "https://pic.netbian.com/4kmeinv/"
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36"
    }
    responese = requests.get(url=url,headers =headers)
    # 可以手动设置相应数据的编码格式 ,设置了utf-8 并不代表一定是成功的
    # responese.encoding='utf-8'
    page_next = responese.text

    #数据解析
    tree =etree.HTML(page_next)
    print(tree)
    #   属性定位 ,获取到的是列表,存储的tr的标签   ,这里是所有li的 列表组合
    li_list = tree.xpath('//*[@id="main"]/div[3]/ul/li')
    print(li_list)

    if not os.path.exists('./meitu'):
        os.mkdir('./meitu')
    # 从列表中没有tr中 获取到
    for li in li_list:
        # li.xpath 局部  . 是代表当前的解析的源码的参照是 li 局部的  ,  0  是一个第元素
        jpgurl = "https://pic.netbian.com"+li.xpath('./a/img/@src')[0]
        imgname = li.xpath('./a/img/@alt')[0]+".jpg"
        # 这里显示乱码 ,就直接修改imagename的编码格式 ,通用的解决方案
        imgname=imgname.encode('iso-8859-1').decode('gbk')
        print(imgname,jpgurl)

#         请求图片地址进行请求
        img_data = requests.get(url=jpgurl,headers =headers).content
        img_path = 'meitu/'+imgname
        with open(img_path,'wb') as f:
            f.write(img_data)
            print(imgname,"下载完成 !")


def street():
    # 国家统计局 截至2021年10.31日
    url = "http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/2021/index.html"
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36"
    }
    responese = requests.get(url=url, headers=headers)
    responese.encoding="utf-8"
    page_next = responese.text
    tree =etree.HTML(page_next)

#     先定位城市
    td_list = tree.xpath('//tr[@class ="provincetr"]/td')

    for td in td_list :
        cityname= td.xpath('./a/text()')[0]
        yjurl = "http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/2021/"+td.xpath('./a/@href')[0]
        print("一级城市:%s,一级地址:%s" %(cityname,yjurl))

        res2=requests.get(url=yjurl,headers =headers)
        res2.encoding = "utf-8"
        page_next2 = res2.text
        tree2 = etree.HTML(page_next2)
        li_list2 = tree2.xpath('//tr[@class="citytr"]/td')
        for ls in li_list2:
            code = ls.xpath('./a/text()')
            erji = "http://www.stats.gov.cn/tjsj/tjbz/tjyqhdmhcxhfdm/2021/"+ls.xpath('./a/@href')[0]
            print("二级城市:%s,二级地址:%s" %(code,erji))


# 免费模板简历下载    success  ,批量下载10页
def jianlixaizai():
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/97.0.4692.71 Safari/537.36"
    }

    for i in range(0,10):
        i+=1
        if i==1:
            url = "https://sc.chinaz.com/jianli/free.html"
        else:
            url ="https://sc.chinaz.com/jianli/free_{}.html".format(i)

        print('url--->',url)
        responese = requests.get(url=url, headers=headers)
        responese.encoding='utf-8'
        pg =responese.text

        tree = etree.HTML(pg)
        print(tree)
        if not os.path.exists('./meitu'):
            os.mkdir('./meitu')
    #     定位列表
        div_list =tree.xpath('//*[@id="container"]/div')
        print(len(div_list))
        for dv in div_list:
            dvurl = "https:" + dv.xpath('./p/a/@href')[0]
            dvname = dv.xpath('./a/img/@alt')[0]+".rar"
            print(dvname, dvurl)

            # 二次进行跳转
            res2 = requests.get(url=dvurl, headers=headers).text
            tree2 = etree.HTML(res2)

    #         定位下载地址
            downurl =tree2.xpath('//*[@id="down"]/div[2]/ul/li/a/@href')[0]
            print(downurl)

            img_data = requests.get(url=downurl, headers=headers).content
            img_path = 'meitu/' + dvname
            with open(img_path, 'wb') as f:
                f.write(img_data)
                print(dvname, "下载完成 !")


def damalogin():

    url ="https://zhuce.jfbym.com/index/member/login.html"
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36"
    }
    data ={
        "phone":"13726412604",
        "password":"123456"
    }

    session =requests.session()
    responese = session.post(url=url,headers=headers,data=data)
    page_next = responese.text

    print(page_next)

    # el = tree.xpath("/html/body/section/div/div/div[2]/div[1]/div[1]//text()")
    # print(el)








if __name__ == '__main__':
    # myspider()
    # yaojianju()
    # usexpath()
    # street()
    # jianlixaizai()
    damalogin()
    # yumalogin()