# -*- coding: utf-8 -*-
# !/usr/bin/env python
# @ author = 'zoushunli'
# @ author_email = '409358374@qq.com'
import os
import urllib.parse

import requests
import random
import bs4
from multiprocessing import Pool
import scrapy
import re
import sqlite3
import xlwt


class BengBengBiliBili(object):
    def __init__(self):
        super(BengBengBiliBili, self).__init__()
        self.user_agent = [
            'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.108 Safari/537.36',
            'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:76.0) Gecko/20100101 Firefox/76.0']
        self.headers = {'User-Agent': random.choice(self.user_agent), 'Accept': '*/*', 'Accept-Language': 'en-US,en;q=0.5'}
        self.re_compile = re.compile
        self.requests_get = requests.get
        self.bs4_BeautifulSoup = bs4.BeautifulSoup
        self.xlwt_Workbook = xlwt.Workbook
        self.sqlite3_connect = sqlite3.connect
        self.session = requests.Session()

    def login(self, login_url):
        login_data = {
            'username': '409358374@qq.com',
            'pwd': 'zou409358374'
        }
        
        login_response = self.session.post(login_url, headers=self.headers, data=login_data)

    def get_cookies(self, url):
        session = requests.Session()
        data = session.get(url)
        print(data)
        print(data.cookies)
    
    def get_html_text(self, url):
        params = {'time': 10, 'verify': False, 'auth': ("409358374@qq.com", "zou409358374")}
        html = ''
        try:
            r = self.requests_get(url, headers=self.headers, timeout=10, verify=False)
            r.status_code
            r.encoding = r.apparent_encoding
            html = r.content
        except Exception as e:
            print('Exception:-->', e)
            print("产生异常")
        finally:
            return html
    
    def prase_html_data(self, html_text, data_list):
        pass
    
    def save_html_data_2_xlwt(self, data_list, path=None):
        if path:
            name = path.rsplit('/', 1)[-1].split('.')[0]
    
        else:
            name = self._name
        work_book = self.xlwt_Workbook(encoding="utf-8", style_compression=0)
        work_sheet = work_book.add_sheet(name, cell_overwrite_ok=True)
        # job_title = ('职位名', '公司名', '薪资', '工作地点',
        #              '工作链接', '公司链接', '发布时间', '福利', '工作信息',  '上班地址', '补充')
    
        job_title = ('job_name', 'company_name', 'salary', 'address',
                     'job_link', 'company_link', 'datatime', 'welfare', 'msg_job',  'label', 'requirement')
        for i in range(len(job_title)):
            work_sheet.write(0, i, job_title[i])
    
        for x in range(len(data_list)):
            for y in range(len(job_title)):
                work_sheet.write(x + 1, y, data_list[x][job_title[y]])
        if not path:
            work_book.save(name + '.xls')
        else:
            work_book.save(path)
    
    def create_table(self, table_name, db_path):
        
        sql = '''
            create table %s
            (id INTEGER PRIMARY KEY AUTOINCREMENT,
            link text ,
            img text ,
            title_zh  varchar ,
            title_en  varchar ,
            rating numeric ,
            judge numeric ,
            inq text ,
            bd text);
            
            
            ''' % table_name
        conn = self.sqlite3_connect(db_path)
        print('Opened database successfully')
        cur = conn.cursor()  # 获取游标
        cur.execute(sql)  # 执行sql语句
        conn.commit()  # 提交数据
        conn.close()
    
    def insert_data(self, data_list, table_name, db_path):
        conn = self.sqlite3_connect(db_path)
        print('Opened database successfully')
        cur = conn.cursor()  # 获取游标
        for data in data_list:
            # print(data)
            for k, v in data.items():
                data[k] = '"' + v + '"'  # 把数据转成字符串 很重要
            sql = """
            insert into %s (job_name, company_name, salary, address,
                     job_link, company_link, datatime, welfare, msg_job,  label, requirement)
            values (%s)""" % (table_name, ",".join(
                [data['job_name'], data['company_name'], data['salary'], data['address'], data['job_link'],
                 data['company_link'], data['datatime'], data['welfare'], data['msg_job'], data['label'],
                 data['requirement']]))
            
            cur.execute(sql)  # 执行sql语句
            conn.commit()  # 提交数据
        cur.close()
        conn.close()
        print('成功插入')
    
    def save_html_data_2_sqlite(self, table_name, data_list, db_path):
        try:
            self.create_table(table_name, db_path)
        except:
            pass
        finally:
            self.insert_data(data_list, table_name, db_path)
    
    def download_img(self, data_list):
        dir_path = './电影海报'
        if not os.path.exists(dir_path):
            os.makedirs(dir_path)
        for i in range(len(data_list)):
            img_url = data_list[i][1].replace('"', '')  # 去掉链接上的引号
            r = self.requests_get(img_url, headers=self.headers, timeout=10)
            img_ext = img_url.rsplit('/', 1)[1].split('.')[-1]
            image_name = data_list[i][2]
            file_name = image_name + '.' + img_ext
            file_path = dir_path + '/' + file_name
            with open(file_path, 'wb') as f:
                f.write(r.content)


def main():
    # 防止因https证书问题报错
    # requests.packages.urllib3.disable_warnings()
    # 将中文转化成%xxx, 进行编码   urllib.parse.quote(query_string)
    # 将%xxx转化成指定字符, 进行解码   urllib.parse.unquote(query_string)
    # 将字典拼接成query_string实现编码功能,   urllib.parse.unlencode(query_string)
    #requests 用get 时  params 可以放字典数据
    #         用post 时  data 可以放字典数据
    # requests.cookies
    # r = self.requests_get(url, headers=self.headers, timeout=10)
    # 获取请求头
    # print(r.request.headers)
    # 获取响应头
    # print(r.headers)
    # 获取状态码
    # r.status_code
    # 获取请求对象的cookie
    # print(r.request._cookies)
    # print('=='*30)
    # 获取响应的cookie
    # print(r.cookies)
    db_path = 'BengBengBiliBili.db'
    table_name = 'BengBengBiliBili'
    url = 'https://movie.douban.com/top250?start='
    demo = BengBengBiliBili()
    data_list = list()
    
    for i in range(0, 10):
        new_url = url + str(i * 25)
        html = demo.get_html_text(new_url)
        demo.prase_html_data(html, data_list)
    
    demo.save_html_data_2_xlwt(data_list)
    demo.save_html_data_2_sqlite(table_name, data_list, db_path)
    demo.download_img(data_list)


if __name__ == "__main__":
    main()

        




