# -*- coding: UTF-8 -*-
# Author: Damon(wuud1)
# CreateDate: 2018-08-01
# Message: 泰明顿网站数据抓取,通过pid查询, 网站看起来复杂,实则简单,几个看起来的加密参数都没有用,分析网站结构用时较多,写起来较快
# 断点续爬功能,重置断点续爬功能,重新运行只需要更改main函数里的参数即可,比较通用

import requests
import redis
import os
import sys
import hashlib
import pymysql
import json
from lxml import etree
import time
from collections import OrderedDict


# 解决编码问题
reload(sys)
sys.setdefaultencoding('utf-8')

class TextarForPid(object):

    def __init__(self, **kwargs):
        self.db_config = kwargs.get("db_config")  # 数据库配置
        self.select_table = kwargs.get("select_table")  # 查询表名
        self.insert_table = kwargs.get("insert_table")  # 插入表名
        self.conn1 = None  # 数据库链接和游标
        self.conn2 = None
        self.curs1 = None
        self.curs2 = None
        self.base_path = '/Users/duanmengdi/Desktop/workspace/textar_html/'  # 存储html文件的路径
        self.location = ""  # 重定向链接
        self.total_num = 0  # 全局计数变量
        self.r = redis.Redis(host='localhost', port=6379, db=0)  # redis链接,用于断点续爬

        self.search_url = 'https://textar.brakebook.com/bb/public/applicationSearch.xhtml'
        self.base_url = 'https://textar.brakebook.com'

        self.cookies = {
            'JSESSIONID': '80F603320C18B7A6BB7796FD189A83AC',
            'locale': 'en_CN',
            '_pk_ses.1.e52e': '*',
            '_pk_id.1.e52e': '3027e45e6444b483.1533018251.3.1533103758.1533096063.',
        }

        self.headers = {
            'Host': 'textar.brakebook.com',
            'Origin': 'https://textar.brakebook.com',
            'Upgrade-Insecure-Requests': '1',
            'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',
            'Referer': 'https://textar.brakebook.com/bb/textar/en_CN/applicationSearch.xhtml',
        }

        self.search_post_data = OrderedDict()
        self.search_post_data['[configId]'] = 'textar'
        self.search_post_data['locale'] = 'en_CN'
        self.search_post_data['[ds]'] = 'LG16'
        self.search_post_data['search_keywords'] = ''  # 搜索的pid
        self.search_post_data['org.apache.myfaces.trinidad.faces.FORM'] = 'searchByKeywordsForm'
        self.search_post_data['_noJavaScript'] = 'false'
        self.search_post_data['source'] = 'j_id581'  # 或许会变动

    def getmd5(self, str):
        md5 = hashlib.md5()
        md5.update(str)
        return md5.hexdigest()

    def get_conn(self, dbconfig_dict):
        conn = pymysql.connect(**dbconfig_dict)
        return conn

    def get_cursor(self, conn, type='stream'):
        if type == 'stream':
            return conn.cursor(pymysql.cursors.SSCursor)  # 返回流式游标,查询大量数据时不占用内存(返回数据形式是元组)
        elif type == 'dict':
            return conn.cursor(pymysql.cursors.DictCursor)  # 返回字典形式游标,查询出的数据以字典形式返回
        else:
            raise Exception("cursor type error")

    def parse_url(self, url, method, post_data=None, headers=None, tofile=True, **kwargs):
        requests.packages.urllib3.disable_warnings()  # 因为把ssl安全验证关了,所以不让代码报不安全的https请求警告
        pid = kwargs.get("local_pid", "")
        charset = kwargs.get("charset", "utf8")  # 暂时没有用到
        allow_redirects = kwargs.get("allow_redirects", False)  # 是否允许重定向, 默认不重定向,获取原始响应
        if method == 'GET':
            data = "GET" + url + pid  # 拼接data
            md5 = self.getmd5(data)  # 获取此次请求电子指纹
            file_path = self.base_path + md5 + ".html"
            if os.path.exists(file_path):  # 判断文件是否存在,如果存在就从本地读取
                print "GET for local"
                with open(file_path, 'r') as f:
                    res = f.read()
                response = res
            else:  # 如果路径不存在, 从web端请求
                print "GET for web"
                try:
                    response = requests.get(url=url, verify=False, headers=headers, cookies=self.cookies, timeout=30,
                                            allow_redirects=allow_redirects)
                except Exception as e:
                    response = requests.get(url=url, verify=False, headers=headers, cookies=self.cookies, timeout=30,
                                            allow_redirects=allow_redirects)

                # 获取cookies, 并更新self.cookies
                cookies = response.cookies.get_dict()
                for k in cookies:
                    self.cookies[k] = cookies[k]

                # 获取location,
                location = response.headers.get("Location", None)
                if location:
                    print "响应头中存在location"
                    self.location = location
                    # return response.content.decode('utf8')

                if response:
                    if tofile:
                        with open(file_path, 'w') as f:  # 保存此次请求的html
                            f.write(response.content.decode('utf8'))
                    response = response.content.decode("utf8")
                else:
                    response = None

        elif method == "POST":
            data = "POST" + url + json.dumps(post_data)
            md5 = self.getmd5(data)
            file_path = self.base_path + md5 + ".html"
            if os.path.exists(file_path):  # 判断文件是否存在,如果存在就从本地读取
                print "POST for local"
                with open(file_path, 'r') as f:
                    res = f.read()
                response = res
            else:
                print "POST for web"
                try:
                    response = requests.post(url=url, data=post_data, verify=False, headers=headers,
                                             cookies=self.cookies, timeout=30,
                                             allow_redirects=allow_redirects)
                except Exception as e:
                    response = requests.post(url=url, data=post_data, verify=False, headers=headers,
                                             cookies=self.cookies, timeout=30,
                                             allow_redirects=allow_redirects)

                # 获取cookies, 并更新self.cookies
                cookies = response.cookies.get_dict()
                for k in cookies:
                    self.cookies[k] = cookies[k]

                # 获取location,
                location = response.headers.get("Location", None)
                if location:
                    print "响应头中存在location"
                    self.location = location
                    # response, file_path = self.parse_url(url=location, method="GET", headers=headers, local_pid=pid)
                    # print "已重新请求location地址"
                    # return response, file_path
                    # return response.content.decode('utf8')

                if response:
                    if tofile:
                        with open(file_path, 'w') as f:  # 保存此次请求的html
                            f.write(response.content.decode('utf8'))
                    response = response.content.decode("utf8")

                else:
                    response = None

        return response, file_path

    def save2db(self, data_dict, curs):
        # 抽取数据
        pid = data_dict['pid'].decode('utf8')
        brand_name = data_dict['brand_name'].decode('utf8')
        part_brand = data_dict['part_brand'].decode('utf8')
        part_num = data_dict['part_num'].decode('utf8')
        part_name = data_dict['part_name'].decode('utf8')
        brand_name1 = data_dict['brand_name1'].decode('utf8')
        pid1 = data_dict['pid1'].decode('utf8')
        status = data_dict['status'].decode('utf8')

        # 拼接sql语句
        sql = '''INSERT INTO `{}` (`pid`, `brand_name`, `part_brand`, `part_num`, `part_name`, `brand_name1`, `pid1`, `status`) VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s')'''.format(self.insert_table) % (
        pid, brand_name, part_brand, part_num, part_name, brand_name1, pid1, status)
        # print sql
        try:
            curs.execute(sql)
        except Exception as e:
            print e
            print sql
            exit()
        self.total_num += 1  # 数据总量计数

    def r_flushall(self):
        self.r.delete('textar_pid')
        self.r.save()

    def run(self):
        # 获取一系列数据库链接,游标
        self.conn1 = self.get_conn(dbconfig_dict=self.db_config)
        self.conn2 = self.get_conn(dbconfig_dict=self.db_config)
        self.curs1 = self.get_cursor(conn=self.conn1)
        self.curs2 = self.get_cursor(conn=self.conn2)
        select_sql = '''select pid,brand from {}'''.format(self.select_table)

        self.curs1.execute(select_sql)

        for data in self.curs1:
            local_pid = data[0]
            # local_pid = '3D0615601C'  # 测试用pid, 有结果
            # local_pid = '4H0615121M'  # 测试用pid, 无结果

            # 断点续爬
            if self.r.sismember('textar_pid', local_pid):
                continue
            self.r.sadd('textar_pid', local_pid)  # 把pid添加到redis中,实现去重与断点续爬

            brand_name = data[1]
            # time.sleep(1)  # 控制请求速度

            self.search_post_data['search_keywords'] = local_pid
            # try:
            # 本次请求存在location重定向, 允许自动重定向, 获取最终响应页面
            search_result_response, search_result_file_path = self.parse_url(url=self.search_url, method="POST", post_data=self.search_post_data, headers=self.headers, local_pid=local_pid, allow_redirects=True)
            if search_result_response:
                search_result_html = etree.HTML(bytes(search_result_response))
            else:
                print "没有获取到搜索结果页响应", local_pid, search_result_file_path
                raise Exception("没有获取到搜索结果页响应")

            detail_url_list = search_result_html.xpath('//a[@class="datasheetLink"]')
            if len(detail_url_list) <= 0:
                print "没有找到详情页url列表, 跳过", local_pid, search_result_file_path
                continue

            for detail_url in detail_url_list:
                detail_url = detail_url.xpath('./@href')
                detail_url = self.base_url+detail_url[0] if len(detail_url)>0 else ""
                if detail_url == "":
                    print "没有找到详情页url, 跳过", local_pid, search_result_file_path
                print detail_url

                detail_response, detail_file_path = self.parse_url(url=detail_url, method="GET", headers=self.headers, local_pid=local_pid, allow_redirects=True)
                if detail_response:
                    detail_html = etree.HTML(bytes(detail_response))
                else:
                    print "没有获取详情页响应", local_pid, detail_file_path
                    raise Exception("没有获取详情页响应")

                part_name = detail_html.xpath('//div[contains(text(), "Article number")]/../div[@class="nameCell_name"]/text()')
                part_name = part_name[0].replace(' ', '').replace('\n', '').replace('\t', '') if len(part_name)>0 else ""

                part_num = detail_html.xpath('//div[contains(text(), "Article number")]/text()')
                part_num = part_num[0].replace(' ', '').replace('\n', '').replace('\t', '').split(':')[1] if len(part_num)>0 else ""

                oe_tr_list = detail_html.xpath('//span[text()="Manufacturer"]/../../../tr')
                if len(oe_tr_list) <= 0:
                    print "没有找到oe对照列表, 跳过", local_pid, detail_file_path

                oe_tr_list.pop(0)
                for oe_tr in oe_tr_list:
                    brand_name1 = oe_tr.xpath('./td[1]/div/text()')
                    pid1 = oe_tr.xpath('./td[2]/div/div/text()')
                    brand_name1 = brand_name1[0].replace(' ', '').replace('\n', '').replace('\t', '') if len(brand_name1)>0 else ""
                    pid1 = pid1[0].replace(' ', '').replace('\n', '').replace('\t', '') if len(pid1)>0 else ""

                    # 组装最终字典
                    last_data = dict(
                        pid=local_pid,
                        brand_name=brand_name,
                        part_brand='textar',
                        part_num=part_num,
                        part_name=part_name,
                        brand_name1=brand_name1,
                        pid1=pid1,
                        status=''
                    )

                    self.save2db(data_dict=last_data, curs=self.curs2)
                    print last_data

                    # 提交一次redis缓存
                    self.r.save()

                    if self.total_num % 100 == 0:
                        self.conn2.commit()
                        print "提交insert缓存, 当前数量: {}".format(self.total_num)
            # except Exception as e:
            #     self.conn2.commit()
            #     print "爬虫遇到故障,已提交insert缓存"
            #     raise e

        self.conn2.commit()
        print "最终提交, 数量: {}".format(self.total_num)

        self.curs1.close()
        self.curs2.close()
        self.conn1.close()
        self.conn2.close()

if __name__ == '__main__':
    db_config = dict(
        host="127.0.0.1",
        port=3306,
        user="root",
        passwd="123456",
        db="my_data",
        charset="utf8"
    )
    select_table = '0731_pid_2'
    insert_table = 'textar_all'

    tools = TextarForPid(db_config=db_config, select_table=select_table, insert_table=insert_table)
    # tools.r_flushall()  # 重置断点续爬
    tools.run()
