package main

import (
	"GitHub/tnan/requests"
	"errors"
	"fmt"
	htmlquery "github.com/antchfx/xquery/html"
	"github.com/garyburd/redigo/redis"
	"github.com/jinzhu/gorm"
	"strconv"
	"strings"
)

func GetBlogUrl(url string) map[string]string {
	list := map[string]string{}

	num := 1
	for {
		content := requests.Get(url+strconv.Itoa(num), nil) // 获取url网站数据
		num++
		//数据提取
		xpath, _ := htmlquery.Parse(strings.NewReader(content))
		li := htmlquery.Find(xpath, "//div[@id='mainBox']//div[@class='article-list']/div[@class='article-item-box csdn-tracking-statistics']")
		fmt.Println("len  aabbccdd ", len(li))
		//提取url 和 展示的数据
		for i, _ := range li {
			blog_url := htmlquery.SelectAttr(htmlquery.Find(li[i], "//p/a[last()]")[0], "href")
			showcontent := htmlquery.Find(li[i], "//p[@class='content']/a/text()")
			list[blog_url] = showcontent[0].Data
		}
		//判断获取的url是否够40个，如果够，则代表着可能还有下一页
		if len(li) < 40 {
			break
		}
	}
	return list
}

func DataExtract(body string) (string, string, string, string, error) {

	//数据提取
	xpath, _ := htmlquery.Parse(strings.NewReader(body))

	li := htmlquery.Find(xpath, "//div[@class='blog-content-box']/article[@class='baidu_pl']")

	if len(li) <= 0 {

		return "", "", "", "", errors.New("下标越界a")
	}
	//提取博客主体内容
	content := "<article class='baidu_pl'>"
	content += htmlquery.OutputHTML(li[0], false)
	content += "</article>"
	//提取发布时间
	times := htmlquery.Find(xpath, "//div[@class='blog-content-box']//div[@class='bar-content']/span[@class='time']/text()")
	blogtype := htmlquery.Find(xpath, "//div[@class='blog-content-box']//div[@class='blog-tags-box']//a[@class='tag-link']/text()")

	if len(times) <= 0 || len(blogtype) <= 0 {

		return "", "", "", "", errors.New("下标越界b")

	}
	//提取博客类型
	blogtypes := strings.Replace(string(blogtype[0].Data), " ", "", 100000)
	//blogtime := GetTimeStamp(times.Data)
	//提取博客标题
	title := htmlquery.Find(xpath, "//div[@class='article-title-box']/h1/text()")

	if len(title) <= 0 {
		return "", "", "", "", errors.New("下标越界c")
	}
	//fmt.Println(blogtime, blogtypes, title.Data)
	return times[0].Data, blogtypes, content, title[0].Data, nil

}

func AddDatabase(showcontent, times, types, content, title string, db *gorm.DB) {
	//字典记录博客文章类型
	dict := map[string]int{}
	dict["gorm"] = 2
	dict["C++"] = 3
	dict["数据结构与算法"] = 5
	dict["GO"] = 2
	dict["web"] = 6
	dict["sql"] = 4
	dict["MySQL"] = 4
	dict["Docker"] = 6
	dict["python基础高级"] = 1

	typess := strings.Replace(types, "\n", "", 1000000)
	type_id := dict[typess]

	if type_id == 0 {
		type_id = 6
	}
	//添加数据
	blog := Blog{}
	blog.BlogTypeID = type_id
	blog.Content = content
	blog.CreatedAt = GetTimeStamp(times)
	blog.Title = title
	blog.ShowContent = showcontent
	//添加到数据库中
	db.Create(&blog)
}
func GetBlogBody(lists map[string]string, redis_conl redis.Conn, db *gorm.DB) {
	for i, j := range lists {
		r, _ := redis.String(redis_conl.Do("Get", i))
		//在redis中获取url是否存在，如果存在，这代表该url已经被爬过了，就跳过不爬
		if len(r) != 0 {
			continue
		}

		for {
			body := requests.Get(i, nil)
			times, types, content, title, err := DataExtract(body) // 数据提取
			fmt.Println("aabbccdd", err)
			if err == nil {
				AddDatabase(j, times, types, content, title, db) // 将数据添加到mysql数据库中
				break
			}
		}

		_, _ = redis_conl.Do("set", i, "yes") //将爬过的url放到redis中，去重

	}

}
func main() {
	redis_clent, err := redis.Dial("tcp", "127.0.0.1:6379", redis.DialDatabase(11)) //连接redis 11号数据库
	db := ConnectMysql()                                                            // 连接mysql数据库
	defer db.Close()

	map_dict := GetBlogUrl("https://blog.csdn.net/qq_42031243/article/list/") // 获取博客中的url数据
	if err != nil {
		fmt.Println("redis数据连接错误")
		return
	}
	defer redis_clent.Close()
	GetBlogBody(map_dict, redis_clent, db)
}
