# @Time:2018.7.20
# @Author:menxging

import pymysql
import re
import json
import requests
from bs4 import BeautifulSoup
from lxml import etree
from core500_words import CORE500WORDS
from utils.OxfordAPI import OxfordApi
from selenium import webdriver
import re


class SpiderIciba(object):
    """
    爬取http://www.iciba.com 上的单词数据

    word表：spelling pron_en pron_en_audio pron_us pron_us_audio _root prefix suffix _synonym
    _antonym
    definition表：pos词性 content释义 examples
    phrase表：text_cn text_en analysis examples例句
    sentence表：text_cn text_en audio analysis

    """

    def __init__(self):
        """
        初始化爬虫参数
        :return:
        """
        self.base_url_1 = 'http://www.iciba.com/'
        self.base_url_2 = 'http://dict.youdao.com/search?q='
        self.headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko"}
        self.count = 1  # 计数器
        self.driver = webdriver.Chrome()

    def run_spider(self, word):
        """
        爬取数据
        :return:
        """
        # 需要爬取的单词
        # 完整url
        url_1 = self.base_url_1 + word
        url_2 = self.base_url_2 + word
        # 返回结果
        response_1 = requests.get(url=url_1, headers=self.headers)
        response_2 = requests.get(url=url_2, headers=self.headers)

        self.driver.get(url_1)
        return [response_1.content, response_2.content]

    def parse_data(self, response, word):
        """
        分析数据，摘取目标数据
        :return:data_dict
        """
        # 使用xpath进行数据解析
        # 解析网页内容
        soup = BeautifulSoup(response[0], 'lxml')
        html_1 = etree.HTML(response[0])
        html = etree.HTML(response[1])

        # 抽取出相应的字段
        # 1) 拼写
        spelling = soup.select('h1[class="keyword"]')[0].get_text().strip()
        print("---" * 20)
        print("当前是第%s个:" % self.count)
        print("拼写：%s"% spelling)

        # 2）英式音标
        pron_en =  soup.select('.base-speak span span')[0].get_text()
        pron_en = re.search(r'\[.*\]',pron_en).group()
        print('英式音标为：%s'%pron_en)

        # 3）美式音标
        pron_us = soup.select('.base-speak span span')[1].get_text()
        pron_us = re.search(r'\[.*\]', pron_us).group()
        print('美式音标为：%s'%pron_us)

        # 4）pron_en_audio 英式音标发音
        # 获取到含有读音的标签
        tag_str = str(soup.select('.base-speak .new-speak-step')[0])
        # 匹配出读音的url
        pron_en_audio = re.search(r'http://.*?\.mp3', tag_str).group()
        print('英式音标发音为：%s'%pron_en_audio)

        # 5) pron_us_audio 美式音标发音
        tag_str = str(soup.select('.base-speak .new-speak-step')[1])
        # 匹配出读音的url
        pron_us_audio = re.search(r'http://.*?\.mp3', tag_str).group()
        print('美式音标发音为：%s' % pron_us_audio)

        # 6）_root 词根
        # _root = OxfordApi().get__root(word) # 使用OxfordAPI获取
        _root = html.xpath('//*[@id="relWordTab"]/p[1]/span/a/text()')[0]
        print('词根为：%s'%_root)

        # fix = soup.select('h1[class="word-root family-chinese size-english"]')[0]
        # fix = html_1.xpath('/html/body/div[4]/div[6]/div[2]/div[2]/div[5]/div/div/div/h1')
        # print('fix:',fix)

        # 使用selenium+chrome抓取，如果不存在，就过
        data = ''
        try:
            data = self.driver.find_element_by_xpath('/html/body/div[4]/div[6]/div[2]/div[2]/div[5]/div/div/div/h1').text
        except:
            pass
        # 判断抓取的是词根，前缀还是后缀
        if re.match(r'词根',data):
            print('%s'%data)
        elif re.match(r'前缀', data):
            # 7）prefix 前缀
            prefix = data
            print('%s'%prefix)
        elif re.match(r'后缀',data):
            # 8）suffix 后缀
            suffix = data
            print('%s' %data)

        try:
            _synonym, _antonym = OxfordApi().get__synonym__antonym(word)
            # 9）_synonym 同义词
            print('同义词：%s'%_synonym)
            # 10）_antonym 反义词
            print('反义词：%s'%_antonym)
        except:
            pass

        # 11）pos 词性

        # 12）content释义

        # 13）examples例句

        # 14) text_cn 词组中文

        # 15）text_en 词组英文

        # 16）analysis 词组解析

        # 17）examples 词组例句

        # 18）text_cn 例句中文

        # 19）text_en 例句英文

        # 20）audio 例句发音

        # 21）analysis 例句解析





        # 3) 翻译
        trans = ''.join([item.get_text() for item in soup.select('li[class="clearfix"] span')])
        print("翻译：" + trans)

        # 4）词性
        part_of_speech = ';'.join([item.get_text() for item in soup.select('li[class="clearfix"] span[class=prop]')])
        print("词性：" + part_of_speech)

        # 5) 例句
        example_sentense = '\n'.join(
            [item.text for item in html.xpath('//*[@id="collinsResult"]/div/div/div/div/ul/li[1]/div[2]/div/p')])
        print("例句：" + example_sentense)


        # 返回目标数据的字典形式
        # data_dict = {
        #     'word':word,
        #     'soundmark':soundmark,
        #     'trans':trans,
        #     'part_of_speech':part_of_speech,
        #     'example_sentense':example_sentense,
        #     'USA_pronunciation':USA_pronunciation,
        #     'UK_pronunciation':UK_pronunciation,
        #     'word_root_or_affix':'',
        #     'synonymy':'',
        #     'antonym':''
        # }
        # return data_dict

    def save_data(self, data_dict):
        """
        将数据存储到mysql中
        :return:
        """
        # 使用pymysql进行对数据库的交互
        # 创建连接
        db = pymysql.connect(
            host='localhost',
            port=3306,
            user='root',
            passwd='mysql',
            db='test_1',
            use_unicode=True,
            charset="utf8"
        )
        # 创建cursor游标
        cursor = db.cursor()
        print("---"*20)
        # sql 语句
        sql = """ INSERT INTO danci_wordslibrary (word, soundmark, trans, part_of_speech, example_sentense,\
                  USA_pronunciation, UK_pronumciation, word_root_or_affix, synonymy, antonym) \
                  VALUES (%s, %s, %s, %s, %s, %s, %s, %s,%s,%s)""" % \
              (data_dict['word'], data_dict['soundmark'], data_dict['trans'], data_dict['part_of_speech'],\
               data_dict['example_sentense'], data_dict['USA_pronunciation'], data_dict['UK_pronunciation'], '', '', '')
        # 使用excute方法执行sql语句
        cursor.execute(sql)
        # 提交
        cursor.commit()
        # 关闭数据库连接
        db.close()

    def start_spider(self):
        """
        主逻辑
        :return:
        """
        # 遍历所有单词，进行数据爬取
        for word in CORE500WORDS:
        # for word in ['apple']:
            # 爬取数据
            response = self.run_spider(word)
            # 没有获得数据就跳过
            # 分析数据
            data_dict = self.parse_data(response, word)
            # 存储数据
            # self.save_data(data_dict)
            self.count += 1


if __name__ == '__main__':
    # 实例化爬虫对象
    spider_iciba = SpiderIciba()
    # 开始
    spider_iciba.start_spider()
