// 并发爬虫
package main

import (
	"fmt"
	"io/ioutil"
	"log"
	"net/http"
)

// 候选URLS
func getUrls() []string {
	var urls []string
	for i := 1; i <= 39; i++ {
		urls = append(urls, fmt.Sprintf("http://www.crazyant.net/page/%d", i))
	}
	return urls
}

// 一个网页的信息，包含URL和SIZE
type Page struct {
	Url  string
	Size int
}

func main() {
	urls := getUrls()
	fmt.Println(urls)

	mychanel := make(chan Page)
	for _, url := range urls {
		fmt.Println("craw url: ", url)
		go scrapyWebUrl(url, mychanel)
	}

	// 打印结果
	for i := 0; i < len(urls); i++ {
		page := <-mychanel
		fmt.Println("craw result: ", page.Url, page.Size)
	}
}

func scrapyWebUrl(url string, mychanel chan Page) {
	response, err := http.Get(url)
	if err != nil {
		log.Fatal(err)
	}
	defer response.Body.Close()
	body, err := ioutil.ReadAll(response.Body)
	if err != nil {
		log.Fatal(err)
	}

	page := Page{Url: url, Size: len(body)}
	mychanel <- page
}
