import requests
from lxml import etree
from bs4 import BeautifulSoup
from analysis.spider import common

def get_tree(url,headers,encoding = None):
    response = requests.get(url,headers=headers)
    if encoding != None:#有些网站编码是GBK，python默认utf-8不转变是乱码
        response.encoding = encoding

    return etree.HTML(response.content)

#采用requests，传入url,在source中查找
def find_in_source_requests(url,header_txt,find_list,encoding=None):
    response = header_requests_get(header_txt, url, encoding=encoding)
    print('返回值：{}'.format(response))
    response = response.text

    for find in find_list:
        index = response.find(find)
        if index != -1:
            print('{}在源码中已找到'.format(find))
        else:
            print('源码未找到：{}'.format(find))

#通过复制到txt中的请求头，进行request请求，返回get的值，同时可储存代码到TXT
def header_requests_get(header_filename,url,encoding = None,save=False,filename = None):
    #获取请求头（将request_header粘贴到名为'headers.txt'的文件中）
    headers = common.header_trans(header_filename)
    r = requests.get(url, headers=headers)

    if encoding != None:
        r.encoding = encoding

    if save == True:
        with open(filename +'.txt', 'wb+') as f:
            f.write(r.content)
    return r

    # 将网页代码保存

def BF4_cutcontent(re_get):
    html = re_get.text
    bf = BeautifulSoup(html, "lxml")

    return bf