1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192 |
- #!/usr/bin/python3
- # coding: utf-8
- # {{{ Installation notes
- # To get it working (SNI support) on debian wheezy:
- # - apt-get install python3-openssl python3-minimal
- # - apt-get install -t wheezy-backports python3-requests python3-urllib3
- # }}}
- # {{{ Imports
- import sys
- import re
- import argparse
- import requests
- import traceback
- import time
- # }}}
- # {{{ Constants
- _NAME = "web-checker"
- _DESC = "tool to check web page content"
- _VERSION = "0.1"
- # }}}
- # {{{ WebPage
- class WebPage(object):
- _options = None
- _elapsed = 0
- def __init__(self, options):
- self._options = options
- def elapsed(self):
- return "%.3f" % (self._elapsed)
- def check(self):
- start_time = time.time()
- r = requests.get(self._options.url,
- timeout=self._options.timeout,
- verify=not self._options.no_ssl_verify)
- self._elapsed = time.time() - start_time
- if r.status_code != self._options.status:
- raise AssertionError('Wrong HTTP code returned: %d != %d' % (r.status_code, self._options.status))
- if self._options.regexp is not None:
- try:
- exp = re.compile(self._options.regexp)
- except:
- raise ValueError('Content regex is invalid: %s' % (self._options.regexp))
- matched = False
- if hasattr(r, 'text') and r.text is not None and exp.findall(r.text) != []:
- matched = True
- elif hasattr(r, 'content') and r.content is not None and exp.findall(r.content) != []:
- matched = True
- if not matched:
- raise AssertionError('Web page content do not match regexp: %s' % (self._options.regexp))
- # }}}
- # {{{ main
- def main():
- parser = argparse.ArgumentParser(prog=_NAME, description=_DESC)
- parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + _VERSION )
- parser.add_argument('-d', '--debug', dest='debug', default=False, action='store_true',
- help='Print additional debug informations: traceback, etc. (default: no)')
- parser.add_argument('-t', '--timeout', dest='timeout', default=5, type=int, help='Timeout in seconds')
- parser.add_argument('-s', '--status', dest='status', default=200, type=int, help='Expected HTTP status')
- parser.add_argument('-r', '--regexp', dest='regexp', default=None, help='Regexp matching for web page content')
- parser.add_argument('-n', '--no-ssl-verify', dest='no_ssl_verify', default=False, action='store_true', help='Disable SSL certificate verification')
- parser.add_argument('-E', '--elapsed', dest='elapsed', default=False, action='store_true', help='Return elapsed time instead of status')
- parser.add_argument('url')
- options = parser.parse_args()
- result = None
- try:
- w = WebPage(options)
- w.check()
- result = w.elapsed() if options.elapsed else 0
- except Exception as e:
- result = '' if options.elapsed else 1
- if options.debug:
- print('Exception raised: %s' % (e))
- traceback.print_exc(file=sys.stderr)
- sys.exit(-1)
- print(result)
- # }}}
- # {{{ __main__
- if __name__ == "__main__":
- main()
- # }}}
- # vim: foldmethod=marker foldlevel=0 foldenable
|