forked from lining0806/PythonSpiderNotes
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdb_reporting_plugin.py
More file actions
executable file
·139 lines (123 loc) · 5.42 KB
/
db_reporting_plugin.py
File metadata and controls
executable file
·139 lines (123 loc) · 5.42 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
"""
This is the Database test reporting plugin for
recording all test run data in the database.
"""
import getpass
import time
import uuid
from optparse import SUPPRESS_HELP
from nose.plugins import Plugin
from nose.exc import SkipTest
from seleniumbase.core.application_manager import ApplicationManager
from seleniumbase.core.testcase_manager import ExecutionQueryPayload
from seleniumbase.core.testcase_manager import TestcaseDataPayload
from seleniumbase.core.testcase_manager import TestcaseManager
from seleniumbase.fixtures import constants
from seleniumbase.fixtures import errors
class DBReporting(Plugin):
"""
The plugin for reporting test results in the database.
"""
name = 'db_reporting' # Usage: --with-db_reporting
def __init__(self):
Plugin.__init__(self)
self.execution_guid = str(uuid.uuid4())
self.testcase_guid = None
self.execution_start_time = 0
self.case_start_time = 0
self.application = None
self.testcase_manager = None
self.error_handled = False
def options(self, parser, env):
super(DBReporting, self).options(parser, env=env)
parser.add_option('--database_environment', action='store',
dest='database_env',
choices=('prod', 'qa', 'test'),
default='test',
help=SUPPRESS_HELP)
def configure(self, options, conf):
super(DBReporting, self).configure(options, conf)
self.options = options
self.testcase_manager = TestcaseManager(self.options.database_env)
def begin(self):
"""At the start of the run, we want to record the test
execution information in the database."""
exec_payload = ExecutionQueryPayload()
exec_payload.execution_start_time = int(time.time() * 1000)
self.execution_start_time = exec_payload.execution_start_time
exec_payload.guid = self.execution_guid
exec_payload.username = getpass.getuser()
self.testcase_manager.insert_execution_data(exec_payload)
def startTest(self, test):
"""At the start of the test, set the testcase details."""
data_payload = TestcaseDataPayload()
self.testcase_guid = str(uuid.uuid4())
data_payload.guid = self.testcase_guid
data_payload.execution_guid = self.execution_guid
if hasattr(test, "browser"):
data_payload.browser = test.browser
else:
data_payload.browser = "N/A"
data_payload.testcaseAddress = test.id()
application = ApplicationManager.generate_application_string(test)
data_payload.env = application.split('.')[0]
data_payload.start_time = application.split('.')[1]
data_payload.state = constants.State.NOTRUN
self.testcase_manager.insert_testcase_data(data_payload)
self.case_start_time = int(time.time() * 1000)
# Make the testcase guid available to other plugins
test.testcase_guid = self.testcase_guid
def finalize(self, result):
"""At the end of the run, we want to
update the DB row with the execution time."""
runtime = int(time.time() * 1000) - self.execution_start_time
self.testcase_manager.update_execution_data(self.execution_guid,
runtime)
def addSuccess(self, test, capt):
"""
After test completion, we want to record testcase run information.
"""
self.__insert_test_result(constants.State.PASS, test)
def addError(self, test, err, capt=None):
"""
After a test error, we want to record testcase run information.
"""
self.__insert_test_result(constants.State.ERROR, test, err)
def handleError(self, test, err, capt=None):
"""
After a test error, we want to record testcase run information.
"Error" also encompasses any states other than Pass or Fail, so we
check for those first.
"""
if err[0] == errors.BlockedTest:
self.__insert_test_result(constants.State.BLOCKED, test, err)
self.error_handled = True
raise SkipTest(err[1])
return True
elif err[0] == errors.DeprecatedTest:
self.__insert_test_result(constants.State.DEPRECATED, test, err)
self.error_handled = True
raise SkipTest(err[1])
return True
elif err[0] == errors.SkipTest:
self.__insert_test_result(constants.State.SKIP, test, err)
self.error_handled = True
raise SkipTest(err[1])
return True
def addFailure(self, test, err, capt=None, tbinfo=None):
"""
After a test failure, we want to record testcase run information.
"""
self.__insert_test_result(constants.State.FAILURE, test, err)
def __insert_test_result(self, state, test, err=None):
data_payload = TestcaseDataPayload()
data_payload.runtime = int(time.time() * 1000) - self.case_start_time
data_payload.guid = self.testcase_guid
data_payload.execution_guid = self.execution_guid
data_payload.state = state
if err is not None:
data_payload.message = err[1].__str__().split(
'''-------------------- >> '''
'''begin captured logging'''
''' << --------------------''', 1)[0]
self.testcase_manager.update_testcase_data(data_payload)