Fix the header section configuration format.
[erp5.git] / erp5 / util / webchecker / __init__.py
1 # -*- coding: utf-8 -*-
2 ##############################################################################
3 #
4 # Copyright (c) 2010 Nexedi SA and Contributors. All Rights Reserved.
5 # Nicolas Delaby <nicolas@nexedi.com>
6 #
7 # WARNING: This program as such is intended to be used by professional
8 # programmers who take the whole responsability of assessing all potential
9 # consequences resulting from its eventual inadequacies and bugs
10 # End users who are looking for a ready-to-use solution with commercial
11 # garantees and support are strongly adviced to contract a Free Software
12 # Service Company
13 #
14 # This program is Free Software; you can redistribute it and/or
15 # modify it under the terms of the GNU General Public License
16 # as published by the Free Software Foundation; either version 2
17 # of the License, or (at your option) any later version.
18 #
19 # This program is distributed in the hope that it will be useful,
20 # but WITHOUT ANY WARRANTY; without even the implied warranty of
21 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 # GNU General Public License for more details.
23 #
24 # You should have received a copy of the GNU General Public License
25 # along with this program; if not, write to the Free Software
26 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
27 #
28 ##############################################################################
29 import os
30 import shutil
31 import sys
32 import re
33 import shlex
34 from subprocess import Popen, PIPE, STDOUT
35 import logging
36 import tempfile
37 from datetime import datetime
38 import threading
39 import signal
40
41 _MARKER = []
42
43 class ProcessTimeoutException(Exception):
44 pass
45
46 def alarm_handler(signum, frame):
47 raise ProcessTimeoutException
48
49 class HTTPCacheCheckerTestSuite(object):
50
51 URL_CODE = 'url'
52 STATUS_CODE = 'status'
53 FILE_PATH_CODE = 'path'
54 OTHER_CODE = 'other'
55
56 LOG_LEVEL_DICT = {'debug': logging.DEBUG,
57 'info': logging.INFO,
58 'warning': logging.WARNING,
59 'error': logging.ERROR,
60 'critical': logging.CRITICAL}
61
62
63 url_search_in_wget_regex = re.compile('^--\d{4}.*--\s+(?P<%s>.*)$' %\
64 URL_CODE)
65 status_search_in_wget_regex = re.compile('^HTTP request sent, awaiting '\
66 'response\.\.\. (?P<%s>\d+).+$' % STATUS_CODE)
67 file_save_search_regex = re.compile("^Saving to: `(?P<%s>.*)'" %\
68 FILE_PATH_CODE)
69
70 x_cache_header_search_regex = re.compile('X-Cache:\s(\S+)\s?$', re.MULTILINE)
71 x_varnish_header_search_regex = re.compile('X-Varnish:\s(\d+)', re.MULTILINE)
72 generic_header_search_regex = '%s:\s(.*)\s$'
73
74 ACCEPTABLE_STATUS_LIST = ('200', '304', '302',)
75
76 def __init__(self, root_url, working_directory, varnishlog_binary_path,
77 wget_binary_path, header_list, email_address, smtp_host,
78 debug_level, file_log_path, conditional_header_dict,
79 no_header_dict):
80 """
81 root_url : website to check
82 working_directory : where fetched data will be downloaded
83 varnishlog_binary_path : path to varnishlog
84 wget_binary_path : path to wget command
85 header_list : Key == Header id.
86 value: if equals True means header
87 needs to be present in RESPONSE
88 if this is tuple, the Header value must sastify
89 at least one of the proposed values
90 email_address : email address to send result
91 smtp_host : smtp host to use
92 debug_level : log level of this utility (debug =>very verbose,
93 info=>normal,
94 warning=> nothing)
95 file_log_path: path to log file
96 conditional_header_dict : Key == Section id (like 'header url='.*/login')
97 value: the configuration lines in this sction
98 (config format is same as header_list)
99 no_header_dict : Key == Section id (like 'no_header url=.*/sitemap')
100 value: = not exsiting headers
101 """
102 self.root_url = root_url
103 self.working_directory = working_directory
104 self.varnishlog_binary_path = varnishlog_binary_path
105 self.wget_binary_path = wget_binary_path
106 self.header_list = header_list
107 self.conditional_header_dict = conditional_header_dict
108 self.no_header_dict = no_header_dict
109 self.email_address = email_address
110 self.smtp_host = smtp_host
111 level = self.LOG_LEVEL_DICT.get(debug_level, logging.INFO)
112 logging.basicConfig(filename=file_log_path, level=level)
113 self.report_dict = {}
114 self._timeout = 30
115
116 def _initFolder(self):
117 """Delete and create workgin directory
118 """
119 if os.path.isdir(self.working_directory):
120 logging.debug('Re-creating folder:%r' % self.working_directory)
121 shutil.rmtree(self.working_directory)
122 os.mkdir(self.working_directory)
123 if not os.path.isdir(self.working_directory):
124 logging.debug('Creating folder:%r' % self.working_directory)
125 os.mkdir(self.working_directory)
126
127 def _runVarnishLog(self):
128 """Run varnishlog and listen comunications
129 """
130 logging.info('Start varnishlog process')
131 command_string = '%s -d' % (self.varnishlog_binary_path)#, filename)
132 command_list = shlex.split(command_string)
133 process = Popen(command_list, stdin=PIPE, stdout=PIPE)
134 return process
135
136 def _readVarnishLog(self, process):
137 """Add timeout support
138 """
139 logging.info('Reading varnishlog with timeout:%r' % self._timeout)
140 def _kill_process(pid):
141 # support for python 2.5 and under.
142 # Shall use process.terminate() for python2.6 and above
143 os.kill(pid, signal.SIGTERM)
144 watcher = threading.Timer(self._timeout, _kill_process, args=(process.pid,))
145 watcher.start()
146 varnishlog_data, _ = process.communicate()
147 watcher.cancel()
148 return varnishlog_data
149
150 def _readVarnishLogAndGetIsBackendTouched(self, varnihslog_data,
151 x_varnish_reference_list):
152 """Utility to parse such string:
153
154 14 ReqStart c 127.0.0.1 58470 385643239
155 14 RxRequest c GET
156 14 RxURL c /web/VirtualHostBase/http/www.example.com:80/erp5/web_site_module/example_website/VirtualHostRoot/_vh_/ga/account/
157 14 RxProtocol c HTTP/1.1
158 14 RxHeader c Host: 127.0.0.1:6081
159 14 RxHeader c User-Agent: Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.7) Gecko/20100110 Mandriva Linux/1.9.1.7-0.1mdv2010.0 (2010.0) Firefox/3.5.7
160 14 RxHeader c Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
161 14 RxHeader c Accept-Language: fr,fr-fr;q=0.8,en-us;q=0.5,en;q=0.3
162 14 RxHeader c Accept-Encoding: gzip,deflate
163 14 RxHeader c Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7
164 14 RxHeader c Referer: http://www.example.com/
165 14 RxHeader c If-Modified-Since: Thu, 25 Feb 2010 16:22:23 GMT
166 14 RxHeader c Via: 1.1 www.example.com
167 14 RxHeader c X-Forwarded-For: 82.226.226.226
168 14 RxHeader c X-Forwarded-Host: www.example.com
169 14 RxHeader c X-Forwarded-Server: www.example.com
170 14 RxHeader c Connection: Keep-Alive
171 14 VCL_call c recv
172 14 VCL_return c lookup
173 14 VCL_call c hash
174 14 VCL_return c hash
175 14 VCL_call c miss
176 14 VCL_return c fetch
177 14 Backend c 15 default default
178 15 TxRequest b GET
179 15 TxURL b /web/VirtualHostBase/http/www.example.com:80/erp5/web_site_module/example_website/VirtualHostRoot/_vh_/ga/account/
180 15 TxProtocol b HTTP/1.1
181 15 TxHeader b Host: 127.0.0.1:6081
182 15 TxHeader b User-Agent: Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.7) Gecko/20100110 Mandriva Linux/1.9.1.7-0.1mdv2010.0 (2010.0) Firefox/3.5.7
183 15 TxHeader b Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
184 15 TxHeader b Accept-Charset: ISO-8859-1,utf-8;q=0.7,*;q=0.7
185 15 TxHeader b Referer: http://www.example.com/
186 15 TxHeader b Via: 1.1 www.example.com
187 15 TxHeader b X-Forwarded-For: 82.226.226.226
188 15 TxHeader b X-Forwarded-Host: www.example.com
189 15 TxHeader b X-Forwarded-Server: www.example.com
190 15 TxHeader b X-Varnish: 385643239
191 15 TxHeader b X-Forwarded-For: 127.0.0.1
192 16 SessionOpen c 127.0.0.1 58471 :6081
193 16 ReqStart c 127.0.0.1 58471 385643240
194 16 RxRequest c GET
195 16 RxURL c /web/VirtualHostBase/http/www.example.com:80/erp5/web_site_module/example_website/VirtualHostRoot/_vh_/erp5_web_example_layout.css
196 16 RxProtocol c HTTP/1.1
197 16 RxHeader c Host: 127.0.0.1:6081
198 [...]
199
200 This script is able to know if the request goes to the backend or not according ID of request
201 ie: 385643239
202 if we are able to read such string "TxHeader b X-Varnish: 385643239"
203 means varnish send a query to the backend.
204 """
205 for x_varnish_reference in x_varnish_reference_list:
206 backend_is_touched_regex = re.compile('TxHeader\s+b\sX-Varnish:\s%s' %\
207 x_varnish_reference[0])
208 match_object = backend_is_touched_regex.search(varnihslog_data)
209 backend_touched = match_object is not None
210 logging.debug('%r %r' % (backend_touched, x_varnish_reference,))
211 if backend_touched != x_varnish_reference[1]:
212 if backend_touched:
213 title = 'Error:Backend touched'
214 else:
215 title = 'Error Backend not touched'
216 message = '%s -> X-Varnish:%r' % (title, x_varnish_reference,)
217 self.report_dict.setdefault(x_varnish_reference[2],
218 []).append(message)
219
220 def _parseWgetLine(self, line):
221 """return tuple (code, value)
222 code: 'url' means this is requested url
223 'status' means we can read status code
224 'other' something we can discard
225 """
226 match_object = self.url_search_in_wget_regex.search(line)
227 if match_object is not None:
228 return self.URL_CODE, match_object.group(self.URL_CODE)
229 match_object = self.status_search_in_wget_regex.search(line)
230 if match_object is not None:
231 return self.STATUS_CODE, match_object.group(self.STATUS_CODE)
232 match_object = self.file_save_search_regex.search(line)
233 if match_object is not None:
234 return self.FILE_PATH_CODE, match_object.group(self.FILE_PATH_CODE)
235 return self.OTHER_CODE, line
236
237 def _runSpider(self):
238 """Run wget in working_directory with headers in result
239 """
240 wget_command_string = '%s -r -nc --retry-connrefused --save-headers %s '\
241 % (self.wget_binary_path, self.root_url)
242 logging.debug('wget command:%r' % wget_command_string)
243 wget_argument_list = shlex.split(wget_command_string)
244 wget_process = Popen(wget_argument_list, stdin=PIPE,
245 stdout=PIPE, stderr=STDOUT,
246 env={'LANG': 'en_EN'}, # Force output messages
247 # in English
248 universal_newlines=True,
249 cwd=self.working_directory) # working directory
250 stdout, stderr = wget_process.communicate()
251 return stdout
252
253 def _getHeaderPolicyList(self, url, fetched_data):
254 """
255 create the header checking policy list by the url, and the header
256
257 [header_list]
258 Last-Modified = True
259 Vary = Accept-Language, Cookie, Accept-Encoding
260 Cache-Control = max-age=300
261 max-age=3600
262 [header url=.*/contact_form]
263 Last-Modified = True
264
265 [no-header content-type=(image/.*|.*/javascript)]
266 Vary = False
267
268 [erp5_extension_list]
269 prohibited_folder_name_list = web_page_module
270 document_module
271 prohibited_file_name_list = WebSection_viewAsWeb
272 Base_viewHistory
273 list
274 """
275 def getNoCheckHeaderList(url, fetched_data):
276 """
277 create no check header list
278 """
279 pick_content_type = re.compile('^no_header\s*content-type=(.*)')
280 pick_url = re.compile('^no_header\s*url=(.*)')
281 section_list = self.no_header_dict.keys()
282 no_check_header_list = []
283 for section in section_list:
284 content_type_regex_str_match = pick_content_type.match(section)
285 url_regex_str_match = pick_url.match(section)
286 if content_type_regex_str_match is not None:
287 content_type_regex_str = content_type_regex_str_match.group(1)
288 content_type_regex = \
289 re.compile('Content-Type:\s%s' % content_type_regex_str,
290 re.MULTILINE | re.IGNORECASE)
291 if content_type_regex.search(fetched_data) is not None:
292 for header, value in self.no_header_dict[section]:
293 no_check_header_list.append(header)
294 continue
295 if url_regex_str_match is not None:
296 url_regex_str = url_regex_str_match.group(1)
297 if re.compile(url_regex_str).match(url) is not None:
298 for header, value in self.no_header_dict[section]:
299 no_check_header_list.append(header)
300 return no_check_header_list
301
302 def getConditionalHeaderDict(url, fetched_data):
303 """ create header policy by the url and header"""
304 conditional_header_dict = {}
305 section_list = self.conditional_header_dict.keys()
306 pick_content_type = re.compile('header\s*content-type=(.*)')
307 pick_url = re.compile('header\s*url=(.*)')
308 for section in section_list:
309 content_type_regex_str_match = pick_content_type.match(section)
310 url_regex_str_match = pick_url.match(section)
311 if content_type_regex_str_match is not None:
312 content_type_regex_str = content_type_regex_str_match.group(1)
313 content_type_regex = \
314 re.compile('Content-Type:\s%s' % content_type_regex_str,
315 re.MULTILINE | re.IGNORECASE)
316 if content_type_regex.search(fetched_data) is not None:
317 for header, value in self.conditional_header_dict[section]:
318 conditional_header_dict[header] = _formatConfiguration(value)
319 continue
320 if url_regex_str_match is not None:
321 url_regex_str = url_regex_str_match.group(1)
322 if re.compile(url_regex_str).match(url) is not None:
323 for header, value in self.conditional_header_dict[section]:
324 conditional_header_dict[header] = _formatConfiguration(value)
325 return conditional_header_dict
326
327 validator_dict = {}
328 no_header_list = getNoCheckHeaderList(url, fetched_data)
329 conditional_header_dict = getConditionalHeaderDict(url, fetched_data)
330 conditional_header_list = conditional_header_dict.keys()
331 global_header_list = self.header_list.keys()
332 header_policy_list = []
333 if conditional_header_list:
334 conditional_check_header_set = (set(conditional_header_list)
335 - set(no_header_list))
336 for header in conditional_check_header_set:
337 header_policy_list.append((header, conditional_header_dict[header]))
338 else:
339 global_check_header_set = (set(global_header_list)
340 - set(no_header_list))
341 for header in global_check_header_set:
342 header_policy_list.append((header, self.header_list[header]))
343 # return items
344 return header_policy_list
345
346 def _validateHeader(self, url, header, reference_value, fetched_data):
347 """validate header with the header policy"""
348 re_compiled = re.compile(self.generic_header_search_regex % header,
349 re.MULTILINE | re.IGNORECASE)
350 match_object = re_compiled.search(fetched_data)
351 if match_object is None:
352 message = 'header:%r not found' % (header)
353 self.report_dict.setdefault(url, []).append(message)
354 else:
355 read_value = match_object.group(1)
356 if reference_value is True and not read_value:
357 message = 'value of header:%r not found' % (header)
358 self.report_dict.setdefault(url, []).append(message)
359 elif isinstance(reference_value, (tuple,list)):
360 if read_value not in reference_value:
361 message = 'value of header:%r does not match'\
362 ' (%r not in %r)' %\
363 (header, read_value, reference_value)
364 self.report_dict.setdefault(url, []).append(message)
365
366 def _isSameUrl(self, url):
367 """
368 Return whether the url is already checked or not.
369
370 Example case):
371 http://example.com/login_form
372 http://example.com/login_form/
373 """
374 if url in (None, ''):
375 return False
376 same_url = None
377 if url.endswith('/'):
378 same_url = url.rstrip('/')
379 else:
380 same_url = '%s/' % url
381 if same_url in self.report_dict:
382 return True
383 return False
384
385 def _parseWgetLogs(self, wget_log_file, discarded_url_list=_MARKER,
386 prohibited_file_name_list=None,
387 prohibited_folder_name_list=None):
388 """read wget logs and test Caching configuration
389 """
390 if discarded_url_list is _MARKER:
391 first_pass = True
392 else:
393 first_pass = False
394 x_varnish_reference_list = []
395
396 for line in wget_log_file.splitlines():
397 logging.debug('wget output:%r' % line)
398 code, value = self._parseWgetLine(line)
399 if code == self.URL_CODE:
400 # This is the first Line by URL checked in wget stdout
401 url = value
402 logging.debug('url:%r' % url)
403 discarded = False
404 if not first_pass and url in discarded_url_list:
405 # URL already checked during first pass
406 logging.debug('%r Discarded' % url)
407 discarded = True
408 elif self._isSameUrl(url):
409 discarded = True
410 if discarded:
411 # keep reading wget process without doing anything
412 continue
413 if code == self.STATUS_CODE:
414 if value not in self.ACCEPTABLE_STATUS_LIST:
415 message = 'Page in error status:%r' % (value)
416 self.report_dict.setdefault(url, []).append(message)
417 if code == self.FILE_PATH_CODE:
418 # Here we check if Response was cached
419 file_path = os.path.sep.join((self.working_directory, value))
420 folder_path , filename = os.path.split(file_path)
421 if prohibited_file_name_list:
422 if '?' in filename:
423 filename = filename.rpartition('?')[0]
424 if filename in prohibited_file_name_list:
425 # Error
426 message = '%r is prohibited as filename' % filename
427 self.report_dict.setdefault(url, []).append(message)
428 if prohibited_folder_name_list:
429 for prohibited_folder_name in prohibited_folder_name_list:
430 if prohibited_folder_name in folder_path.split(os.path.sep):
431 message = '%r is prohibited as folder name' % prohibited_folder_name
432 self.report_dict.setdefault(url, []).append(message)
433 break
434 file_object = None
435 try:
436 file_object = open(file_path, 'r')
437 except IOError:
438 # This is probably a folder try with /index.html
439 index_file_path = file_path + os.path.sep + 'index.html'
440 try:
441 file_object = open(index_file_path, 'r')
442 except IOError:
443 # sometimes this is appended with .1
444 number_file_path = file_path + '.1'
445 try:
446 file_object = open(number_file_path, 'r')
447 except IOError:
448 logging.info('File not found for url:%r %r' %\
449 (url, (file_path, index_file_path, number_file_path),))
450 continue
451 fetched_data = file_object.read()
452 file_object.close()
453 x_cache_header_match_object =\
454 self.x_cache_header_search_regex.search(fetched_data)
455 if x_cache_header_match_object is None:
456 # This RESPONSE is not cached by Varnish
457 message = 'X-Cache header not found'
458 self.report_dict.setdefault(url, []).append(message)
459 else:
460 # means X-Cache header is present in reponse
461 # Read the X-Varnish header to know if backend has been touched
462 x_varnish_match_object =\
463 self.x_varnish_header_search_regex.search(fetched_data)
464 x_varnish_reference = x_varnish_match_object.group(1)
465 logging.debug('x_varnish_reference:%r for url:%r' %\
466 (x_varnish_reference, url))
467 hits = x_cache_header_match_object.group(1)
468 if hits.isdigit():
469 # This is a cached content with a positive hit value
470 # Check if request didn't goes to the backend
471 x_varnish_reference_list.append((x_varnish_reference, False, url))
472 # dot not check this url in second pass
473 logging.debug('will be discarded:%r' % url)
474 discarded_url_list.append(url)
475 else:
476 x_varnish_reference_list.append((x_varnish_reference, True, url))
477 # parse the web checker configuration file and run the header
478 # validation method
479 for header, reference_value in self._getHeaderPolicyList(
480 url, fetched_data):
481 self._validateHeader(url, header, reference_value, fetched_data)
482 return x_varnish_reference_list, discarded_url_list[:]
483
484 def start(self, prohibited_file_name_list=None,
485 prohibited_folder_name_list=None):
486 """Run test suite
487 return errors
488 """
489 logging.info('#'*52)
490 logging.info('date:%r' % (datetime.now().isoformat()))
491 logging.info('#'*52)
492 self._initFolder()
493 logging.info('First pass:%r' % self.root_url)
494 varnishlog_reading_process = self._runVarnishLog()
495 wget_log_file = self._runSpider()
496 varnishlog_data = self._readVarnishLog(varnishlog_reading_process)
497 x_varnish_reference_list, discarded_url_list =\
498 self._parseWgetLogs(wget_log_file)
499 self._readVarnishLogAndGetIsBackendTouched(varnishlog_data,
500 x_varnish_reference_list)
501 logging.info('End of First pass\n')
502 [logging.debug(discarded_url) for discarded_url in discarded_url_list]
503 self._initFolder()
504 logging.info('Second pass:%r' % self.root_url)
505 varnishlog_reading_process = self._runVarnishLog()
506 wget_log_file = self._runSpider()
507 varnishlog_data = self._readVarnishLog(varnishlog_reading_process)
508 x_varnish_reference_list, discarded_url_list =\
509 self._parseWgetLogs(wget_log_file,
510 discarded_url_list=discarded_url_list,
511 prohibited_file_name_list=prohibited_file_name_list,
512 prohibited_folder_name_list=prohibited_folder_name_list)
513 self._readVarnishLogAndGetIsBackendTouched(varnishlog_data,
514 x_varnish_reference_list)
515 logging.info('End of second pass\n')
516 if self.report_dict:
517 report_message_list = ['*Errors*:']
518 for url, message_list in self.report_dict.iteritems():
519 unique_message_list = []
520 for message in message_list:
521 if message not in unique_message_list:
522 unique_message_list.append(message)
523 report_message_list.append('\n')
524 report_message_list.append(url)
525 report_message_list.extend(['\t%s' % message for message\
526 in unique_message_list])
527 report_message = '\n'.join(report_message_list)
528 signal = 'PROBLEM'
529 else:
530 report_message = 'No errors'
531 signal = 'OK'
532 subject = '%r:HTTP Cache checker results for %s' % (signal, self.root_url)
533 if self.email_address:
534 import smtplib
535 message = 'Subject: %s\nFrom: %s\nTo: %s\n\n%s' %\
536 (subject, self.email_address, self.email_address, report_message)
537 server = smtplib.SMTP(self.smtp_host)
538 server.sendmail(self.email_address, self.email_address, message)
539 server.quit()
540 return 'Email sended to %s' % self.email_address
541 else:
542 return subject + '\n' + report_message
543
544
545 from optparse import OptionParser
546 import ConfigParser
547
548 def _formatConfiguration(configuration):
549 """ format the configuration"""
550 if configuration in ('True', 'true', 'yes'):
551 return True
552 return configuration.splitlines()
553
554 def web_checker_utility():
555 usage = "usage: %prog [options] config_path"
556 parser = OptionParser(usage=usage)
557 parser.add_option('-o', '--output_file',
558 dest='output_file')
559
560 (options, args) = parser.parse_args()
561 if len(args) != 1 :
562 print parser.print_help()
563 parser.error('incorrect number of arguments')
564 config_path = args[0]
565
566 config = ConfigParser.RawConfigParser()
567 config.read(config_path)
568 working_directory = config.get('web_checker', 'working_directory')
569 url = config.get('web_checker', 'url')
570 varnishlog_binary_path = config.get('web_checker', 'varnishlog_binary_path')
571 wget_binary_path = 'wget'
572 if config.has_option('web_checker', 'wget_binary_path'):
573 wget_binary_path = config.get('web_checker', 'wget_binary_path')
574 email_address = config.get('web_checker', 'email_address')
575 smtp_host = config.get('web_checker', 'smtp_host')
576 debug_level = config.get('web_checker', 'debug_level')
577 file_log_path = 'web_checker.log'
578 if config.has_option('web_checker', 'file_log_path'):
579 file_log_path = config.get('web_checker', 'file_log_path')
580 header_list = {}
581 for header, configuration in config.items('header_list'):
582 if header in config.defaults().keys():
583 # defaults are shared for all sections.
584 # so discard them from header_list
585 continue
586 value = _formatConfiguration(configuration)
587 header_list[header] = value
588 conditional_header_dict = {}
589 no_header_dict = {}
590 for section in config.sections():
591 item_list = config.items(section)
592 if re.compile("^header\s.*").match(section) is not None:
593 conditional_header_dict.setdefault(section, []).extend(item_list)
594 if re.compile("^no_header\s.*").match(section) is not None:
595 no_header_dict.setdefault(section, []).extend(item_list)
596 if config.has_section('erp5_extension_list'):
597 prohibited_file_name_list = config.get('erp5_extension_list',
598 'prohibited_file_name_list').splitlines()
599 prohibited_folder_name_list = config.get('erp5_extension_list',
600 'prohibited_folder_name_list').splitlines()
601 else:
602 prohibited_file_name_list = prohibited_folder_name_list = []
603 instance = HTTPCacheCheckerTestSuite(url,
604 working_directory,
605 varnishlog_binary_path,
606 wget_binary_path,
607 header_list,
608 email_address,
609 smtp_host,
610 debug_level,
611 file_log_path,
612 conditional_header_dict,
613 no_header_dict)
614
615 result = instance.start(prohibited_file_name_list=prohibited_file_name_list,
616 prohibited_folder_name_list=prohibited_folder_name_list)
617 if options.output_file:
618 file_object = open(options.output_file, 'w')
619 file_object.write(result)
620 file_object.close()
621 else:
622 print result
623
624