1 | n/a | """ robotparser.py |
---|
2 | n/a | |
---|
3 | n/a | Copyright (C) 2000 Bastian Kleineidam |
---|
4 | n/a | |
---|
5 | n/a | You can choose between two licenses when using this package: |
---|
6 | n/a | 1) GNU GPLv2 |
---|
7 | n/a | 2) PSF license for Python 2.2 |
---|
8 | n/a | |
---|
9 | n/a | The robots.txt Exclusion Protocol is implemented as specified in |
---|
10 | n/a | http://www.robotstxt.org/norobots-rfc.txt |
---|
11 | n/a | """ |
---|
12 | n/a | |
---|
13 | n/a | import collections |
---|
14 | n/a | import urllib.parse |
---|
15 | n/a | import urllib.request |
---|
16 | n/a | |
---|
17 | n/a | __all__ = ["RobotFileParser"] |
---|
18 | n/a | |
---|
19 | n/a | class RobotFileParser: |
---|
20 | n/a | """ This class provides a set of methods to read, parse and answer |
---|
21 | n/a | questions about a single robots.txt file. |
---|
22 | n/a | |
---|
23 | n/a | """ |
---|
24 | n/a | |
---|
25 | n/a | def __init__(self, url=''): |
---|
26 | n/a | self.entries = [] |
---|
27 | n/a | self.default_entry = None |
---|
28 | n/a | self.disallow_all = False |
---|
29 | n/a | self.allow_all = False |
---|
30 | n/a | self.set_url(url) |
---|
31 | n/a | self.last_checked = 0 |
---|
32 | n/a | |
---|
33 | n/a | def mtime(self): |
---|
34 | n/a | """Returns the time the robots.txt file was last fetched. |
---|
35 | n/a | |
---|
36 | n/a | This is useful for long-running web spiders that need to |
---|
37 | n/a | check for new robots.txt files periodically. |
---|
38 | n/a | |
---|
39 | n/a | """ |
---|
40 | n/a | return self.last_checked |
---|
41 | n/a | |
---|
42 | n/a | def modified(self): |
---|
43 | n/a | """Sets the time the robots.txt file was last fetched to the |
---|
44 | n/a | current time. |
---|
45 | n/a | |
---|
46 | n/a | """ |
---|
47 | n/a | import time |
---|
48 | n/a | self.last_checked = time.time() |
---|
49 | n/a | |
---|
50 | n/a | def set_url(self, url): |
---|
51 | n/a | """Sets the URL referring to a robots.txt file.""" |
---|
52 | n/a | self.url = url |
---|
53 | n/a | self.host, self.path = urllib.parse.urlparse(url)[1:3] |
---|
54 | n/a | |
---|
55 | n/a | def read(self): |
---|
56 | n/a | """Reads the robots.txt URL and feeds it to the parser.""" |
---|
57 | n/a | try: |
---|
58 | n/a | f = urllib.request.urlopen(self.url) |
---|
59 | n/a | except urllib.error.HTTPError as err: |
---|
60 | n/a | if err.code in (401, 403): |
---|
61 | n/a | self.disallow_all = True |
---|
62 | n/a | elif err.code >= 400 and err.code < 500: |
---|
63 | n/a | self.allow_all = True |
---|
64 | n/a | else: |
---|
65 | n/a | raw = f.read() |
---|
66 | n/a | self.parse(raw.decode("utf-8").splitlines()) |
---|
67 | n/a | |
---|
68 | n/a | def _add_entry(self, entry): |
---|
69 | n/a | if "*" in entry.useragents: |
---|
70 | n/a | # the default entry is considered last |
---|
71 | n/a | if self.default_entry is None: |
---|
72 | n/a | # the first default entry wins |
---|
73 | n/a | self.default_entry = entry |
---|
74 | n/a | else: |
---|
75 | n/a | self.entries.append(entry) |
---|
76 | n/a | |
---|
77 | n/a | def parse(self, lines): |
---|
78 | n/a | """Parse the input lines from a robots.txt file. |
---|
79 | n/a | |
---|
80 | n/a | We allow that a user-agent: line is not preceded by |
---|
81 | n/a | one or more blank lines. |
---|
82 | n/a | """ |
---|
83 | n/a | # states: |
---|
84 | n/a | # 0: start state |
---|
85 | n/a | # 1: saw user-agent line |
---|
86 | n/a | # 2: saw an allow or disallow line |
---|
87 | n/a | state = 0 |
---|
88 | n/a | entry = Entry() |
---|
89 | n/a | |
---|
90 | n/a | self.modified() |
---|
91 | n/a | for line in lines: |
---|
92 | n/a | if not line: |
---|
93 | n/a | if state == 1: |
---|
94 | n/a | entry = Entry() |
---|
95 | n/a | state = 0 |
---|
96 | n/a | elif state == 2: |
---|
97 | n/a | self._add_entry(entry) |
---|
98 | n/a | entry = Entry() |
---|
99 | n/a | state = 0 |
---|
100 | n/a | # remove optional comment and strip line |
---|
101 | n/a | i = line.find('#') |
---|
102 | n/a | if i >= 0: |
---|
103 | n/a | line = line[:i] |
---|
104 | n/a | line = line.strip() |
---|
105 | n/a | if not line: |
---|
106 | n/a | continue |
---|
107 | n/a | line = line.split(':', 1) |
---|
108 | n/a | if len(line) == 2: |
---|
109 | n/a | line[0] = line[0].strip().lower() |
---|
110 | n/a | line[1] = urllib.parse.unquote(line[1].strip()) |
---|
111 | n/a | if line[0] == "user-agent": |
---|
112 | n/a | if state == 2: |
---|
113 | n/a | self._add_entry(entry) |
---|
114 | n/a | entry = Entry() |
---|
115 | n/a | entry.useragents.append(line[1]) |
---|
116 | n/a | state = 1 |
---|
117 | n/a | elif line[0] == "disallow": |
---|
118 | n/a | if state != 0: |
---|
119 | n/a | entry.rulelines.append(RuleLine(line[1], False)) |
---|
120 | n/a | state = 2 |
---|
121 | n/a | elif line[0] == "allow": |
---|
122 | n/a | if state != 0: |
---|
123 | n/a | entry.rulelines.append(RuleLine(line[1], True)) |
---|
124 | n/a | state = 2 |
---|
125 | n/a | elif line[0] == "crawl-delay": |
---|
126 | n/a | if state != 0: |
---|
127 | n/a | # before trying to convert to int we need to make |
---|
128 | n/a | # sure that robots.txt has valid syntax otherwise |
---|
129 | n/a | # it will crash |
---|
130 | n/a | if line[1].strip().isdigit(): |
---|
131 | n/a | entry.delay = int(line[1]) |
---|
132 | n/a | state = 2 |
---|
133 | n/a | elif line[0] == "request-rate": |
---|
134 | n/a | if state != 0: |
---|
135 | n/a | numbers = line[1].split('/') |
---|
136 | n/a | # check if all values are sane |
---|
137 | n/a | if (len(numbers) == 2 and numbers[0].strip().isdigit() |
---|
138 | n/a | and numbers[1].strip().isdigit()): |
---|
139 | n/a | req_rate = collections.namedtuple('req_rate', |
---|
140 | n/a | 'requests seconds') |
---|
141 | n/a | entry.req_rate = req_rate |
---|
142 | n/a | entry.req_rate.requests = int(numbers[0]) |
---|
143 | n/a | entry.req_rate.seconds = int(numbers[1]) |
---|
144 | n/a | state = 2 |
---|
145 | n/a | if state == 2: |
---|
146 | n/a | self._add_entry(entry) |
---|
147 | n/a | |
---|
148 | n/a | def can_fetch(self, useragent, url): |
---|
149 | n/a | """using the parsed robots.txt decide if useragent can fetch url""" |
---|
150 | n/a | if self.disallow_all: |
---|
151 | n/a | return False |
---|
152 | n/a | if self.allow_all: |
---|
153 | n/a | return True |
---|
154 | n/a | # Until the robots.txt file has been read or found not |
---|
155 | n/a | # to exist, we must assume that no url is allowable. |
---|
156 | n/a | # This prevents false positives when a user erroneously |
---|
157 | n/a | # calls can_fetch() before calling read(). |
---|
158 | n/a | if not self.last_checked: |
---|
159 | n/a | return False |
---|
160 | n/a | # search for given user agent matches |
---|
161 | n/a | # the first match counts |
---|
162 | n/a | parsed_url = urllib.parse.urlparse(urllib.parse.unquote(url)) |
---|
163 | n/a | url = urllib.parse.urlunparse(('','',parsed_url.path, |
---|
164 | n/a | parsed_url.params,parsed_url.query, parsed_url.fragment)) |
---|
165 | n/a | url = urllib.parse.quote(url) |
---|
166 | n/a | if not url: |
---|
167 | n/a | url = "/" |
---|
168 | n/a | for entry in self.entries: |
---|
169 | n/a | if entry.applies_to(useragent): |
---|
170 | n/a | return entry.allowance(url) |
---|
171 | n/a | # try the default entry last |
---|
172 | n/a | if self.default_entry: |
---|
173 | n/a | return self.default_entry.allowance(url) |
---|
174 | n/a | # agent not found ==> access granted |
---|
175 | n/a | return True |
---|
176 | n/a | |
---|
177 | n/a | def crawl_delay(self, useragent): |
---|
178 | n/a | if not self.mtime(): |
---|
179 | n/a | return None |
---|
180 | n/a | for entry in self.entries: |
---|
181 | n/a | if entry.applies_to(useragent): |
---|
182 | n/a | return entry.delay |
---|
183 | n/a | return self.default_entry.delay |
---|
184 | n/a | |
---|
185 | n/a | def request_rate(self, useragent): |
---|
186 | n/a | if not self.mtime(): |
---|
187 | n/a | return None |
---|
188 | n/a | for entry in self.entries: |
---|
189 | n/a | if entry.applies_to(useragent): |
---|
190 | n/a | return entry.req_rate |
---|
191 | n/a | return self.default_entry.req_rate |
---|
192 | n/a | |
---|
193 | n/a | def __str__(self): |
---|
194 | n/a | return ''.join([str(entry) + "\n" for entry in self.entries]) |
---|
195 | n/a | |
---|
196 | n/a | |
---|
197 | n/a | class RuleLine: |
---|
198 | n/a | """A rule line is a single "Allow:" (allowance==True) or "Disallow:" |
---|
199 | n/a | (allowance==False) followed by a path.""" |
---|
200 | n/a | def __init__(self, path, allowance): |
---|
201 | n/a | if path == '' and not allowance: |
---|
202 | n/a | # an empty value means allow all |
---|
203 | n/a | allowance = True |
---|
204 | n/a | path = urllib.parse.urlunparse(urllib.parse.urlparse(path)) |
---|
205 | n/a | self.path = urllib.parse.quote(path) |
---|
206 | n/a | self.allowance = allowance |
---|
207 | n/a | |
---|
208 | n/a | def applies_to(self, filename): |
---|
209 | n/a | return self.path == "*" or filename.startswith(self.path) |
---|
210 | n/a | |
---|
211 | n/a | def __str__(self): |
---|
212 | n/a | return ("Allow" if self.allowance else "Disallow") + ": " + self.path |
---|
213 | n/a | |
---|
214 | n/a | |
---|
215 | n/a | class Entry: |
---|
216 | n/a | """An entry has one or more user-agents and zero or more rulelines""" |
---|
217 | n/a | def __init__(self): |
---|
218 | n/a | self.useragents = [] |
---|
219 | n/a | self.rulelines = [] |
---|
220 | n/a | self.delay = None |
---|
221 | n/a | self.req_rate = None |
---|
222 | n/a | |
---|
223 | n/a | def __str__(self): |
---|
224 | n/a | ret = [] |
---|
225 | n/a | for agent in self.useragents: |
---|
226 | n/a | ret.extend(["User-agent: ", agent, "\n"]) |
---|
227 | n/a | for line in self.rulelines: |
---|
228 | n/a | ret.extend([str(line), "\n"]) |
---|
229 | n/a | return ''.join(ret) |
---|
230 | n/a | |
---|
231 | n/a | def applies_to(self, useragent): |
---|
232 | n/a | """check if this entry applies to the specified agent""" |
---|
233 | n/a | # split the name token and make it lower case |
---|
234 | n/a | useragent = useragent.split("/")[0].lower() |
---|
235 | n/a | for agent in self.useragents: |
---|
236 | n/a | if agent == '*': |
---|
237 | n/a | # we have the catch-all agent |
---|
238 | n/a | return True |
---|
239 | n/a | agent = agent.lower() |
---|
240 | n/a | if agent in useragent: |
---|
241 | n/a | return True |
---|
242 | n/a | return False |
---|
243 | n/a | |
---|
244 | n/a | def allowance(self, filename): |
---|
245 | n/a | """Preconditions: |
---|
246 | n/a | - our agent applies to this entry |
---|
247 | n/a | - filename is URL decoded""" |
---|
248 | n/a | for line in self.rulelines: |
---|
249 | n/a | if line.applies_to(filename): |
---|
250 | n/a | return line.allowance |
---|
251 | n/a | return True |
---|