summaryrefslogtreecommitdiff
path: root/href.go
blob: d5b0c0f2005832da8ee5a600f4e26b5b69f72f82 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
package main

import (
	"errors"
	"io"
	"net/http"
	"strings"

	"golang.org/x/net/html"
	"golang.org/x/net/html/charset"
)

var (
	errNotHTML = errors.New("not HTML")
	errNotOK   = errors.New("not OK")
	errTooBig  = errors.New("content too big")
	errTooDeep = errors.New("content too deep")
	errNoTitle = errors.New("no title")
)

const (
	maxLength = 10 * 1024 * 1024 // 10MB
	maxDepth  = 10
)

func title(n *html.Node, depth int) (string, error) {
	var s string
	if depth <= 0 {
		return "", errTooDeep
	}
	if n.Type == html.ElementNode && n.Data == "title" {
		for c := n.FirstChild; c != nil; c = c.NextSibling {
			s += c.Data
		}
		return strings.TrimSpace(s), nil
	}
	for c := n.FirstChild; c != nil; c = c.NextSibling {
		if t, err := title(c, depth-1); err == nil {
			return t, nil
		}
	}
	return "", errNoTitle
}

func getTitle(uri string) (string, error) {
	resp, err := http.Get(uri)
	if err != nil {
		return "", err
	}
	defer resp.Body.Close()

	ct := resp.Header.Get("Content-Type")
	if !strings.HasPrefix(ct, "text/html") {
		return "", errNotHTML
	}

	if resp.StatusCode != http.StatusOK {
		return "", errNotOK
	}

	if resp.ContentLength > maxLength {
		return "", errTooBig
	}

	r, err := charset.NewReader(io.LimitReader(resp.Body, maxLength), ct)
	if err != nil {
		return "", err
	}

	doc, err := html.Parse(r)
	if err != nil {
		return "", err
	}

	return title(doc, maxDepth)
}

func getLinks(s string) (ret []string) {
	for _, v := range strings.Fields(s) {
		switch {
		case strings.HasPrefix(v, "www."):
			v = "http://" + v
			fallthrough
		case strings.HasPrefix(v, "http:"), strings.HasPrefix(v, "https:"):
			ret = append(ret, v)
		}
	}
	return
}