# -*- ecoding = utf-8 -*-

import random
import requests
import parsel
import pymysql

class entity:
    label = []  # 先查出数据库中的所有tag
    total_tags = []  # 所有的文章tag
    blogIds = []  # 新插入的文章id合集, 插入文章用
    blogtag_id = []  # 每一行存的是一个blog对应的tag
    contents = [] # 所有文章的内容
    Titles = [] # 所有文章的标题
    links = [] # 每篇blog的link
    myurl = ''


    # 不同浏览器信息
    my_headers = [
        "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36",
        "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:30.0) Gecko/20100101 Firefox/30.0",
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/537.75.14",
        "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Win64; x64; Trident/6.0)",
        'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',
        'Opera/9.25 (Windows NT 5.1; U; en)',
        'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
        'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
        'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
        'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9',
        "Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7",
        "Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0 "
    ]

    def init(self):
        self.label = []  # 先查出数据库中的所有tag
        self.total_tags = []  # 所有的文章tag
        self.blogIds = []  # 新插入的文章id合集, 插入文章用
        self.blogtag_id = []  # 每一行存的是一个blog对应的tag
        self.contents = []  # 所有文章的内容
        self.Titles = []  # 所有文章的标题
        self.links = []  # 每篇blog的link

    def get_head(self):
        header = random.choice(self.my_headers)
        head = {"User-Agent": header}
        return head

    def get_all_article(self, myurl):  # 获取所有文章信息
        db = pymysql.connect(host="localhost", user="root", password="", database="blog", charset="utf8")
        cursor = db.cursor()  # 交流的中间对象
        # if (self.myurl != '' and self.myurl == myurl):
        #     return
        self.init()
        self.myurl = myurl
        # 将数据库中已经有的tag查出来
        s = 'select content from taginfo'
        cursor.execute(s)
        db.commit()
        res = cursor.fetchall()
        print(res)
        for j in res:
            for i in j:
                self.label.append(i)

        # 访问文章目录
        head = self.get_head()
        response = requests.get(url=myurl, headers=head)
        selector = parsel.Selector(response.text)  # 返回一个selector对象
        href = selector.css('.article-list div.article-item-box a::attr(href)').getall()
        id = 0
        for link in href:
            link = str(link)
            res_son = requests.get(url=link, headers=head)
            selector_son = parsel.Selector(res_son.text)  # 返回一个selector对象
            title = selector_son.css('#articleContentId::text').get()  # 文章标题
            content = selector_son.css('#content_views').get()  # 文章内容
            sp = selector_son.css('div.tags-box.artic-tag-box span.label::text').getall()  #
            tags = selector_son.css('div.tags-box.artic-tag-box a.tag-link::text').getall()  # 这片文章的tag
            sp_href = selector_son.css('div.tags-box.artic-tag-box a.tag-link::attr(href)').getall()  # 链接
            time = selector_son.css('div.bar-content span.time::text').get()  # 时间
            time = time.replace('-', '')
            time = time.replace(' ', '')
            time = time.replace(':', '')
            # 记下每篇文章的信息
            if (len(content) > 6100):
                continue
            print(title)
            print(len(content))
            self.Titles.append(title)
            self.contents.append(content)
            self.links.append(link)

            #  统计每篇文章的tag数量
            count_tags = 0
            for href in sp_href:
                if "https://so.csdn.net" in href:
                    count_tags += 1

            #  把每篇文章的tag放到总的tags中
            j = 0
            blt = []
            for i in reversed(tags):
                self.total_tags.append(i)
                blt.append(i)
                j += 1
                if j == count_tags:
                    break
            self.blogtag_id.append(blt)  # 当前的每一篇文章有什么标签


        #  将所有文章含有的tags插入到数据库
        start_tagid = len(self.label) + 1  # 下一次的tagid从这里开始
        total_tags = list(set(self.total_tags))  # 去重
        for i in self.label:  # 去重
            for j in total_tags:
                # print(i + '-' + j)
                if i == j:
                    total_tags.remove(i)
                    print('-------' + i + '被删除')
        print('==')
        print(total_tags)
        self.total_tags = total_tags
        # 把新的tag插入数据库
        pp = 1
        if len(total_tags) > 0:
            for tag in total_tags:
                sql_1 = 'insert into taginfo(content) values(' + "'" + tag + "'" + ')';
                print("second:" + sql_1)
                cursor.execute(sql_1)
                db.commit()

                tagid = str(len(self.label) + pp)
                print(tagid)
                sql_3 = 'alter table `blogtags` add column' + "`" + tagid + "`" + ' int(1) default 0'
                cursor.execute(sql_3)
                db.commit()
                print("success_2")
                pp += 1





    def get_data_1(self, ids):  # 将筛选的blog和所有的tag等插入数据库
        myurl = self.myurl
        # 建立数据库连接

        db = pymysql.connect(host="localhost", user="root", password="", database="blog", charset="utf8")
        cursor = db.cursor()  # 交流的中间对象

        # url = "https://blog.csdn.net/pythonxuexi123/article/list"

        header = random.choice(self.my_headers)
        head = self.get_head()
        response = requests.get(url=myurl, headers=head)
        # print(response.text)

        selector = parsel.Selector(response.text)  # 返回一个selector对象
        href = selector.css('.article-list div.article-item-box a::attr(href)').getall()

        # 用已经筛选过的blogid转为所含有的tag
        newblogtags = []  # 存的是要放到数据库的blogtags，一行一个blog
        for rr in ids:
            print("ids:")
            print(rr)
            print(self.blogtag_id[rr])
            newblogtags.append(self.blogtag_id[rr])

        print(newblogtags)

        k = 0
        count = 0
        insert_blogid = ids[0]
        for link in href:
            if k in ids:
                link = str(link)
                print(k)

                # 子文章部分
                res_son = requests.get(url=link, headers=head)
                selector_son = parsel.Selector(res_son.text)  # 返回一个selector对象
                title = selector_son.css('#articleContentId::text').get()  # 文章标题
                content = selector_son.css('#content_views').get()  # 文章内容
                sp = selector_son.css('div.tags-box.artic-tag-box span.label::text').getall()  #
                # tags = selector_son.css('div.tags-box.artic-tag-box a.tag-link::text').getall()  # 这片文章的tag
                sp_href = selector_son.css('div.tags-box.artic-tag-box a.tag-link::attr(href)').getall()  # 链接
                time = selector_son.css('div.bar-content span.time::text').get()  # 时间
                time = time.replace('-', '')
                time = time.replace(' ', '')
                time = time.replace(':', '')


                if (len(content) > 6100):
                    continue
                print(title)
                print(len(content))
                tags = self.blogtag_id[k]
                #  将这片文章的标题，内容插入到数据库
                try:
                    sql = '''insert into blogInfo(userId, createDate, content, star, blogState, blogTitle, blogFromOther)
                                            value(2, %s, %s, 0, 1, %s, %s)'''
                    cursor.execute(sql, [time, content, title, link])
                    db.commit()

                    # 获得当前插入的blogid
                    sql = "select max(blogId) from blogInfo"
                    cursor.execute(sql)
                    db.commit()
                    id = cursor.fetchall()
                    self.blogIds.append(id[0][0])
                    print("success_1")
                except Exception as e:
                    print(e)

                if count < len(ids) - 1:
                    count += 1
                k += 1


        # 制造sql语句
        alltags = ''
        # total_tags = self.label + self.total_tags  # 当前表中所有的tag
        total_tags = self.label + self.total_tags
        for i in range(1, len(total_tags)):
            alltags += str(i)
            alltags += ','
        alltags += str(len(total_tags))
        insert_tags = 'insert into `blogtags`'  # 拼装出sql的前半部分

        # 把当前blog的tag转换为tagid存下来
        blogtagid = []
        print(newblogtags)
        print(total_tags)
        for i in newblogtags:
            now = []
            for j in i:
                for k in total_tags:
                    if j == k:
                        now.append(total_tags.index(k) + 1)
            blogtagid.append(now)

        print('每个blog的tag')
        print(self.blogtag_id)
        print('每个blog的tagid')
        print(blogtagid)
        print('所有tag')
        print(total_tags)

        # 匹配每个blog有哪些tag
        tmp = [[0] * (len(total_tags) + 1) for _ in range(len(self.blogIds))]  # 最终存储了每个blog是否有这些信息
        print(tmp)
        for i in range(0, len(blogtagid)):
            for j in range(0, len(blogtagid[i])):
                print(blogtagid[i][j])
                print(i)
                tmp[i][blogtagid[i][j]] = 1

        print('对应要加的值')
        print(tmp)
        print(len(tmp))
        # print(len(tmp[0]))

        print(len(tmp))
        for i in range(0, len(tmp)):
            t = ''
            x = str(self.blogIds[i])
            print(x)
            t += x
            t += ','

            for j in range(0, len(tmp[i]) - 2):
                t += str(tmp[i][j])
                t += ','
            t += str(tmp[i][len(tmp[i]) - 1])
            fsql = insert_tags + ' values(' + t + ')'
            print(fsql)
            cursor.execute(fsql)
            db.commit()
            print('success_3')

