Blazemeter / taurus
Showing 11 of 45 files from the diff.
Other files ignored by Codecov
Dockerfile has changed.
setup.py has changed.

@@ -34,7 +34,7 @@
Loading
34 34
        return self._get_val("iterations", self.ITER_SEL, raw=raw)
35 35
36 36
    def get_ramp_up(self, raw=False):
37 -
        return self._get_val("ramp-up", self.RAMP_UP_SEL, default=1, raw=raw)
37 +
        return self._get_val("ramp-up", self.RAMP_UP_SEL, default=0, raw=raw)
38 38
39 39
    def get_concurrency(self, raw=False):
40 40
        raw_concurrency = self._get_val("concurrency", self.CONCURRENCY_SEL, default=1, raw=True)

@@ -0,0 +1,19 @@
Loading
1 +
"""
2 +
Module for reporting into http://www.blazemeter.com/ service
3 +
4 +
Copyright 2015 BlazeMeter Inc.
5 +
6 +
Licensed under the Apache License, Version 2.0 (the "License");
7 +
you may not use this file except in compliance with the License.
8 +
You may obtain a copy of the License at
9 +
10 +
   http://www.apache.org/licenses/LICENSE-2.0
11 +
12 +
Unless required by applicable law or agreed to in writing, software
13 +
distributed under the License is distributed on an "AS IS" BASIS,
14 +
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 +
See the License for the specific language governing permissions and
16 +
limitations under the License.
17 +
"""
18 +
from bzt.modules.blazemeter.blazemeter_reporter import BlazeMeterUploader
19 +
from bzt.modules.blazemeter.cloud_provisioning import CloudProvisioning, ServiceStubScreenshoter, ServiceStubCaptureHAR

@@ -0,0 +1,207 @@
Loading
1 +
"""
2 +
Module for reporting into http://www.blazemeter.com/ service
3 +
4 +
Copyright 2015 BlazeMeter Inc.
5 +
6 +
Licensed under the Apache License, Version 2.0 (the "License");
7 +
you may not use this file except in compliance with the License.
8 +
You may obtain a copy of the License at
9 +
10 +
   http://www.apache.org/licenses/LICENSE-2.0
11 +
12 +
Unless required by applicable law or agreed to in writing, software
13 +
distributed under the License is distributed on an "AS IS" BASIS,
14 +
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 +
See the License for the specific language governing permissions and
16 +
limitations under the License.
17 +
"""
18 +
19 +
from bzt import TaurusConfigError
20 +
from bzt.bza import Test
21 +
from bzt.modules.blazemeter.cloud_test import CloudTaurusTest
22 +
from bzt.modules.blazemeter.net_utils import parse_blazemeter_test_link
23 +
24 +
25 +
class ProjectFinder(object):
26 +
    def __init__(self, parameters, settings, user, workspaces, parent_log):
27 +
        super(ProjectFinder, self).__init__()
28 +
        self.default_test_name = "Taurus Test"
29 +
        self.parameters = parameters
30 +
        self.settings = settings
31 +
        self.log = parent_log.getChild(self.__class__.__name__)
32 +
        self.user = user
33 +
        self.workspaces = workspaces
34 +
        self.test_type = None
35 +
36 +
    def _find_project(self, proj_name):
37 +
        """
38 +
        :rtype: bzt.bza.Project
39 +
        """
40 +
        if isinstance(proj_name, (int, float)):  # project id
41 +
            proj_id = int(proj_name)
42 +
            self.log.debug("Treating project name as ID: %s", proj_id)
43 +
            project = self.workspaces.projects(ident=proj_id).first()
44 +
            if not project:
45 +
                raise TaurusConfigError("BlazeMeter project not found by ID: %s" % proj_id)
46 +
        elif proj_name:
47 +
            project = self.workspaces.projects(name=proj_name).first()
48 +
        else:
49 +
            project = None
50 +
51 +
        return project
52 +
53 +
    def _ws_proj_switch(self, project):
54 +
        if project:
55 +
            return project
56 +
        else:
57 +
            return self.workspaces
58 +
59 +
    def resolve_external_test(self):
60 +
        proj_name = self.parameters.get("project", self.settings.get("project"))
61 +
        test_name = self.parameters.get("test", self.settings.get("test", self.default_test_name))
62 +
63 +
        project = self._find_project(proj_name)
64 +
        if not project and proj_name:
65 +
            project = self.workspaces.first().create_project(proj_name)
66 +
67 +
        test = self._ws_proj_switch(project).tests(name=test_name, test_type='external').first()
68 +
69 +
        if not test:
70 +
            if not project:
71 +
                info = self.user.fetch()
72 +
                project = self.workspaces.projects(ident=info['defaultProject']['id']).first()
73 +
                if not project:
74 +
                    project = self.workspaces.first().create_project("Taurus Tests Project")
75 +
76 +
            test = project.create_test(test_name, {"type": "external"})
77 +
78 +
        return test
79 +
80 +
    def resolve_account(self, account_name):
81 +
        account = None
82 +
83 +
        if isinstance(account_name, (int, float)):  # TODO: what if it's string "123"?
84 +
            acc_id = int(account_name)
85 +
            self.log.debug("Treating account name as ID: %s", acc_id)
86 +
            account = self.user.accounts(ident=acc_id).first()
87 +
            if not account:
88 +
                raise TaurusConfigError("BlazeMeter account not found by ID: %s" % acc_id)
89 +
        elif account_name:
90 +
            account = self.user.accounts(name=account_name).first()
91 +
            if not account:
92 +
                raise TaurusConfigError("BlazeMeter account not found by name: %s" % account_name)
93 +
94 +
        if account:
95 +
            return account
96 +
97 +
        self.user.fetch()
98 +
        account = self.user.accounts(ident=self.user['defaultProject']['accountId']).first()
99 +
        self.log.debug("Using default account: %s", account)
100 +
        return account
101 +
102 +
    def resolve_workspace(self, account, workspace_name):
103 +
        workspace = None
104 +
105 +
        if isinstance(workspace_name, (int, float)):  # TODO: what if it's string "123"?
106 +
            workspace_id = int(workspace_name)
107 +
            self.log.debug("Treating workspace name as ID: %s", workspace_id)
108 +
            workspace = account.workspaces(ident=workspace_id).first()
109 +
            if not workspace:
110 +
                raise TaurusConfigError("BlazeMeter workspace not found by ID: %s" % workspace_id)
111 +
        elif workspace_name is not None:
112 +
            workspace = account.workspaces(name=workspace_name).first()
113 +
            if not workspace:
114 +
                raise TaurusConfigError("BlazeMeter workspace not found: %s" % workspace_name)
115 +
116 +
        if workspace is None:
117 +
            workspace = account.workspaces(ident=self.user['defaultProject']['workspaceId']).first()
118 +
            self.log.debug("Using first workspace: %s" % workspace)
119 +
120 +
        return workspace
121 +
122 +
    def resolve_project(self, workspace, project_name):
123 +
        if isinstance(project_name, (int, float)):  # project id
124 +
            project_id = int(project_name)
125 +
            self.log.debug("Treating project name as ID: %s", project_id)
126 +
            project = workspace.projects(ident=project_id).first()
127 +
            if not project:
128 +
                raise TaurusConfigError("BlazeMeter project not found by ID: %s" % project_id)
129 +
        elif project_name:
130 +
            project = workspace.projects(name=project_name).first()
131 +
        else:
132 +
            project = None
133 +
134 +
        if not project:
135 +
            project = self._create_project_or_use_default(workspace, project_name)
136 +
137 +
        return project
138 +
139 +
    def _create_project_or_use_default(self, workspace, proj_name):
140 +
        if proj_name:
141 +
            return workspace.create_project(proj_name)
142 +
        else:
143 +
            info = self.user.fetch()
144 +
            self.log.debug("Looking for default project: %s", info['defaultProject']['id'])
145 +
            project = self.workspaces.projects(ident=info['defaultProject']['id']).first()
146 +
            if not project:
147 +
                project = workspace.create_project("Taurus Tests Project")
148 +
            return project
149 +
150 +
    def resolve_test(self, project, test_name, test_type):
151 +
        is_int = isinstance(test_name, (int, float))
152 +
        is_digit = isinstance(test_name, str) and test_name.isdigit()
153 +
154 +
        if is_int or is_digit:
155 +
            test_id = int(test_name)
156 +
            self.log.debug("Treating project name as ID: %s", test_id)
157 +
            test = project.tests(ident=test_id, test_type=test_type).first()
158 +
            if not test:
159 +
                raise TaurusConfigError("BlazeMeter test not found by ID: %s" % test_id)
160 +
        elif test_name is not None:
161 +
            test = project.tests(name=test_name, test_type=test_type).first()
162 +
        else:
163 +
            test = None
164 +
165 +
        return test
166 +
167 +
    def get_test_router(self):
168 +
        default_location = self.settings.get("default-location", None)
169 +
        account_name = self.parameters.get("account", self.settings.get("account", None))
170 +
        workspace_name = self.parameters.get("workspace", self.settings.get("workspace", None))
171 +
        project_name = self.parameters.get("project", self.settings.get("project"))
172 +
        test_name = self.parameters.get("test", self.settings.get("test", self.default_test_name))
173 +
        launch_existing_test = self.settings.get("launch-existing-test", False)
174 +
175 +
        # if we're to launch existing test - don't use test_type filter (look for any type)
176 +
        filter_test_type = None if launch_existing_test else self.test_type
177 +
178 +
        test_spec = parse_blazemeter_test_link(test_name)
179 +
        self.log.debug("Parsed test link: %s", test_spec)
180 +
        if test_spec is not None:
181 +
            account, workspace, project, test = self.user.test_by_ids(test_spec.account_id, test_spec.workspace_id,
182 +
                                                                      test_spec.project_id, test_spec.test_id,
183 +
                                                                      test_type=filter_test_type)
184 +
            if not test:
185 +
                raise TaurusConfigError("Test not found: %s", test_name)
186 +
            self.log.info("Found test by link: %s", test_name)
187 +
        else:
188 +
            account = self.resolve_account(account_name)
189 +
            workspace = self.resolve_workspace(account, workspace_name)
190 +
            project = self.resolve_project(workspace, project_name)
191 +
            test = self.resolve_test(project, test_name, test_type=filter_test_type)
192 +
193 +
        if isinstance(test, Test):
194 +
            test_class = CloudTaurusTest
195 +
        else:   # test not found
196 +
            if launch_existing_test:
197 +
                raise TaurusConfigError("Can't find test for launching: %r" % test_name)
198 +
199 +
            test_class = CloudTaurusTest
200 +
201 +
        router = test_class(self.user, test, project, test_name, default_location, launch_existing_test, self.log)
202 +
        router._workspaces = self.workspaces
203 +
        router.cloud_mode = self.settings.get("cloud-mode", None)
204 +
        router.dedicated_ips = self.settings.get("dedicated-ips", False)
205 +
        router.test_type = self.test_type
206 +
        router.send_report_email = self.settings.get("send-report-email", False)
207 +
        return router

@@ -276,7 +276,7 @@
Loading
276 276
277 277
class ChromeDriver(RequiredTool):
278 278
    DOWNLOAD_LINK = "https://chromedriver.storage.googleapis.com/{version}/chromedriver_{arch}.zip"
279 -
    VERSION = "89.0.4389.23"
279 +
    VERSION = "90.0.4430.24"
280 280
281 281
    def __init__(self, config=None, **kwargs):
282 282
        settings = config or {}
@@ -326,7 +326,7 @@
Loading
326 326
class GeckoDriver(RequiredTool):
327 327
    DOWNLOAD_LINK = \
328 328
        "https://github.com/mozilla/geckodriver/releases/download/v{version}/geckodriver-v{version}-{arch}.{ext}"
329 -
    VERSION = "0.23.0"
329 +
    VERSION = "0.29.1"
330 330
331 331
    def __init__(self, config=None, **kwargs):
332 332
        settings = config or {}

@@ -0,0 +1,230 @@
Loading
1 +
"""
2 +
Module for reporting into http://www.blazemeter.com/ service
3 +
4 +
Copyright 2015 BlazeMeter Inc.
5 +
6 +
Licensed under the Apache License, Version 2.0 (the "License");
7 +
you may not use this file except in compliance with the License.
8 +
You may obtain a copy of the License at
9 +
10 +
   http://www.apache.org/licenses/LICENSE-2.0
11 +
12 +
Unless required by applicable law or agreed to in writing, software
13 +
distributed under the License is distributed on an "AS IS" BASIS,
14 +
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 +
See the License for the specific language governing permissions and
16 +
limitations under the License.
17 +
"""
18 +
from abc import abstractmethod
19 +
20 +
import yaml
21 +
22 +
from bzt import TaurusConfigError
23 +
from bzt.bza import Workspace
24 +
from bzt.utils import iteritems, BetterDict
25 +
from bzt.modules.blazemeter.const import LOC
26 +
27 +
TAURUS_TEST_TYPE = "taurus"
28 +
FUNC_API_TEST_TYPE = "functionalApi"
29 +
FUNC_GUI_TEST_TYPE = "functionalGui"
30 +
31 +
32 +
class BaseCloudTest(object):
33 +
    """
34 +
    :type _user: bzt.bza.User
35 +
    :type _project: bzt.bza.Project
36 +
    :type _test: bzt.bza.Test
37 +
    :type master: bzt.bza.Master
38 +
    :type cloud_mode: str
39 +
    """
40 +
41 +
    def __init__(self, user, test, project, test_name, default_location, launch_existing_test, parent_log):
42 +
        self.log = parent_log.getChild(self.__class__.__name__)
43 +
        self.default_location = default_location
44 +
        self._test_name = test_name
45 +
        self._last_status = None
46 +
        self._sessions = None
47 +
        self._started = False
48 +
        self._user = user
49 +
        self._project = project
50 +
        self._test = test
51 +
        self.launch_existing_test = launch_existing_test
52 +
        self.master = None
53 +
        self._workspaces = None
54 +
        self.cloud_mode = None
55 +
        self.dedicated_ips = False
56 +
        self.test_type = None
57 +
        self.send_report_email = False
58 +
59 +
    @abstractmethod
60 +
    def prepare_locations(self, executors, engine_config):
61 +
        pass
62 +
63 +
    @abstractmethod
64 +
    def resolve_test(self, taurus_config, rfiles, delete_old_files=False):
65 +
        pass
66 +
67 +
    @abstractmethod
68 +
    def launch_test(self):
69 +
        """launch cloud test"""
70 +
        pass
71 +
72 +
    @abstractmethod
73 +
    def sanitize_test(self):
74 +
        """Sanitize cloud test"""
75 +
        pass
76 +
77 +
    @abstractmethod
78 +
    def start_if_ready(self):
79 +
        """start cloud test if all engines are ready"""
80 +
        pass
81 +
82 +
    @abstractmethod
83 +
    def get_test_status_text(self):
84 +
        pass
85 +
86 +
    @abstractmethod
87 +
    def stop_test(self):
88 +
        pass
89 +
90 +
    def get_master_status(self):
91 +
        self._last_status = self.master.get_status()
92 +
        return self._last_status
93 +
94 +
95 +
class CloudTaurusTest(BaseCloudTest):
96 +
    def prepare_locations(self, executors, engine_config):
97 +
        available_locations = {}
98 +
        is_taurus4 = True
99 +
        workspace = Workspace(self._project, {'id': self._project['workspaceId']})
100 +
        for loc in workspace.locations(include_private=is_taurus4):
101 +
            available_locations[loc['id']] = loc
102 +
103 +
        if LOC in engine_config and not is_taurus4:
104 +
            self.log.warning("Deprecated test API doesn't support global locations")
105 +
106 +
        for executor in executors:
107 +
            if LOC in executor.execution \
108 +
                    and isinstance(executor.execution[LOC], dict):
109 +
                exec_locations = executor.execution[LOC]
110 +
                self._check_locations(exec_locations, available_locations)
111 +
            elif LOC in engine_config and is_taurus4:
112 +
                self._check_locations(engine_config[LOC], available_locations)
113 +
            else:
114 +
                default_loc = self._get_default_location(available_locations)
115 +
                executor.execution[LOC] = BetterDict.from_dict({default_loc: 1})
116 +
117 +
            executor.get_load()  # we need it to resolve load settings into full form
118 +
119 +
    def _get_default_location(self, available_locations):
120 +
        if self.default_location and self.default_location in available_locations:
121 +
            return self.default_location
122 +
123 +
        self.log.debug("Default location %s not found", self.default_location)
124 +
125 +
        for location_id in sorted(available_locations):
126 +
            location = available_locations[location_id]
127 +
            if location['sandbox'] and not location.get('purposes', {}).get('functional', False):
128 +
                return location_id
129 +
130 +
        if available_locations:
131 +
            location_id = sorted(available_locations.keys())[0]
132 +
            self.log.warning("Using first location as default: %s", location_id)
133 +
            return location_id
134 +
135 +
        self.log.warning("List of supported locations for you is: %s", sorted(available_locations.keys()))
136 +
        raise TaurusConfigError("No sandbox or default location available, please specify locations manually")
137 +
138 +
    def _check_locations(self, locations, available_locations):
139 +
        for location in locations:
140 +
            if location not in available_locations:
141 +
                self.log.warning("List of supported locations for you is: %s", sorted(available_locations.keys()))
142 +
                raise TaurusConfigError("Invalid location requested: %s" % location)
143 +
144 +
    def resolve_test(self, taurus_config, rfiles, delete_old_files=False):
145 +
        if self.launch_existing_test:
146 +
            return
147 +
148 +
        if self._test is not None:
149 +
            test_type = self._test.get("configuration").get("type")
150 +
            if test_type != self.test_type:
151 +
                self.log.debug("Can't reuse test type %r as Taurus test, will create new one", test_type)
152 +
                self._test = None
153 +
154 +
        if self._test is None:
155 +
            test_config = {
156 +
                "type": self.test_type,
157 +
                "plugins": {
158 +
                    "taurus": {
159 +
                        "filename": ""  # without this line it does not work
160 +
                    }
161 +
                }
162 +
            }
163 +
164 +
            self._test = self._project.create_test(self._test_name, test_config)
165 +
166 +
        if delete_old_files:
167 +
            self._test.delete_files()
168 +
169 +
        taurus_config = yaml.safe_dump(taurus_config, default_flow_style=False, explicit_start=True, canonical=False)
170 +
        self._test.upload_files(taurus_config, rfiles)
171 +
        self._test.update_props({'configuration': {'executionType': self.cloud_mode}})
172 +
        self._test.update_props({
173 +
            "shouldSendReportEmail": self.send_report_email
174 +
        })
175 +
176 +
    def sanitize_test(self):
177 +
        self._test.update_props({'overrideExecutions': []})
178 +
        self._test.update_props({'configuration': {'scriptType': 'taurus'}})
179 +
180 +
    def launch_test(self):
181 +
        self.log.info("Initiating cloud test with %s ...", self._test.address)
182 +
        self.master = self._test.start(as_functional=self.test_type in (FUNC_API_TEST_TYPE, FUNC_GUI_TEST_TYPE))
183 +
        return self.master.address + '/app/#/masters/%s' % self.master['id']
184 +
185 +
    def start_if_ready(self):
186 +
        self._started = True
187 +
188 +
    def stop_test(self):
189 +
        if self.master:
190 +
            self.log.info("Ending cloud test...")
191 +
            if not self._last_status:
192 +
                self.get_master_status()
193 +
194 +
            if self._last_status["progress"] >= 100:
195 +
                self.master.stop()
196 +
            else:
197 +
                self.master.terminate()
198 +
199 +
    def get_test_status_text(self):
200 +
        if not self._sessions:
201 +
            self._sessions = self.master.sessions()
202 +
            if not self._sessions:
203 +
                return
204 +
205 +
        mapping = BetterDict()  # dict(executor -> dict(scenario -> dict(location -> servers count)))
206 +
        for session in self._sessions:
207 +
            try:
208 +
                name_split = [part.strip() for part in session['name'].split('/')]
209 +
                location = session['configuration']['location']
210 +
                count = session['configuration']['serversCount']
211 +
                ex_item = mapping.get(name_split[0], force_set=True)
212 +
213 +
                if len(name_split) > 1:
214 +
                    name = name_split[1]
215 +
                else:
216 +
                    name = "N/A"
217 +
218 +
                ex_item.get(name, force_set=True)[location] = count
219 +
            except KeyError:
220 +
                self._sessions = None
221 +
222 +
        txt = "%s #%s\n" % (self._test['name'], self.master['id'])
223 +
        for executor, scenarios in iteritems(mapping):
224 +
            txt += " %s" % executor
225 +
            for scenario, locations in iteritems(scenarios):
226 +
                txt += " %s:\n" % scenario
227 +
                for location, count in iteritems(locations):
228 +
                    txt += "  Agents in %s: %s\n" % (location, count)
229 +
230 +
        return txt

@@ -21,7 +21,8 @@
Loading
21 21
from bzt.modules import ScenarioExecutor, FileLister, SelfDiagnosable
22 22
from bzt.modules.console import WidgetProvider, ExecutorWidget
23 23
from bzt.modules.aggregator import ResultsReader, ConsolidatingAggregator
24 -
from bzt.utils import RequiredTool, CALL_PROBLEMS, FileReader, shutdown_process
24 +
from bzt.utils import RequiredTool, CALL_PROBLEMS, FileReader, shutdown_process, get_full_path, is_windows, is_mac, \
25 +
    untar
25 26
26 27
27 28
class VegetaExecutor(ScenarioExecutor, FileLister, WidgetProvider, HavingInstallableTools, SelfDiagnosable):
@@ -41,8 +42,6 @@
Loading
41 42
        self.install_required_tools()
42 43
43 44
        self.script = self.get_script_path()
44 -
        print(self.script)
45 -
46 45
        if not self.script:
47 46
            requests = self.scenario.get_requests()
48 47
            if not requests:
@@ -72,7 +71,7 @@
Loading
72 71
            self.engine.aggregator.add_underling(self.reader)
73 72
74 73
    def startup(self):
75 -
        cmdline = [self.vegeta.tool_name, "attack", "-targets", self.script]
74 +
        cmdline = [self.vegeta.tool_path, "attack", "-targets", self.script]
76 75
        load = self.get_load()
77 76
78 77
        if load.throughput:
@@ -89,7 +88,7 @@
Loading
89 88
90 89
        self.process = self._execute(cmdline, stdout=PIPE, shell=False)
91 90
        with open(self.kpi_file, 'wb') as f:
92 -
            self._execute(["vegeta", "encode", "-to=csv"], stdin=self.process.stdout, stdout=f, shell=False)
91 +
            self._execute([self.vegeta.tool_path, "encode", "-to=csv"], stdin=self.process.stdout, stdout=f, shell=False)
93 92
94 93
    def get_widget(self):
95 94
        if not self.widget:
@@ -148,16 +147,26 @@
Loading
148 147
149 148
150 149
class Vegeta(RequiredTool):
150 +
    DOWNLOAD_LINK = \
151 +
        "https://github.com/tsenart/vegeta/releases/download/v{version}/vegeta_{version}_{platform}_amd64.tar.gz "
152 +
    VERSION = "12.8.4"
153 +
    LOCAL_PATH = "~/.bzt/vegeta-taurus/{version}/"
154 +
151 155
    def __init__(self, config=None, **kwargs):
152 156
        settings = config or {}
153 -
        tool_path = settings.get('path', 'vegeta')
154 -
155 -
        super(Vegeta, self).__init__(tool_path=tool_path, installable=False, **kwargs)
157 +
        version = settings.get("version", self.VERSION)
158 +
        self.tool_path = get_full_path(settings.get("path", self.LOCAL_PATH.format(version=version) + 'vegeta'))
159 +
        if not is_windows():
160 +
            platform = 'darwin' if is_mac() else 'linux'
161 +
            download_link = settings.get("download-link", self.DOWNLOAD_LINK).format(version=version, platform=platform)
162 +
        else:
163 +
            download_link = ''
164 +
        super(Vegeta, self).__init__(tool_path=self.tool_path, download_link=download_link, version=version, **kwargs)
156 165
157 166
    def check_if_installed(self):
158 167
        self.log.debug('Checking Vegeta Framework: %s' % self.tool_path)
159 168
        try:
160 -
            out, err = self.call(['vegeta', '-version'])
169 +
            out, err = self.call([self.tool_path, '-version'])
161 170
        except CALL_PROBLEMS as exc:
162 171
            self.log.warning("%s check failed: %s", self.tool_name, exc)
163 172
            return False
@@ -166,3 +175,23 @@
Loading
166 175
            out += err
167 176
        self.log.debug("Vegeta output: %s", out)
168 177
        return True
178 +
179 +
    def install(self):
180 +
        if is_windows():
181 +
            raise ToolError("Unable to install Vegeta on Windows! Manual installation required.")
182 +
183 +
        dest = get_full_path(self.tool_path, step_up=1)
184 +
        if not os.path.exists(dest):
185 +
            os.makedirs(dest)
186 +
187 +
        self.log.info("Will install %s into %s", self.tool_name, dest)
188 +
        vegeta_dist = self._download(use_link=True)
189 +
190 +
        self.log.info("Untaring %s", vegeta_dist)
191 +
        untar(vegeta_dist, dest, rel_path='vegeta')
192 +
        os.remove(vegeta_dist)
193 +
        os.chmod(get_full_path(self.tool_path), 0o755)
194 +
        self.log.info("Installed Vegeta successfully")
195 +
196 +
        if not self.check_if_installed():
197 +
            raise ToolError("Unable to run %s after installation!" % self.tool_name)

@@ -0,0 +1,835 @@
Loading
1 +
"""
2 +
Module for reporting into http://www.blazemeter.com/ service
3 +
4 +
Copyright 2015 BlazeMeter Inc.
5 +
6 +
Licensed under the Apache License, Version 2.0 (the "License");
7 +
you may not use this file except in compliance with the License.
8 +
You may obtain a copy of the License at
9 +
10 +
   http://www.apache.org/licenses/LICENSE-2.0
11 +
12 +
Unless required by applicable law or agreed to in writing, software
13 +
distributed under the License is distributed on an "AS IS" BASIS,
14 +
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 +
See the License for the specific language governing permissions and
16 +
limitations under the License.
17 +
"""
18 +
import copy
19 +
import logging
20 +
import os
21 +
import sys
22 +
import time
23 +
import traceback
24 +
import zipfile
25 +
from collections import Counter
26 +
from time import sleep
27 +
from urllib.error import URLError
28 +
29 +
import requests
30 +
from terminaltables import SingleTable, AsciiTable
31 +
from urwid import Pile, Text
32 +
33 +
from bzt import AutomatedShutdown, TaurusConfigError, TaurusException, TaurusNetworkError, NormalShutdown
34 +
from bzt.bza import User, Session, BZA_TEST_DATA_RECEIVED, ENDED
35 +
from bzt.engine import Reporter, Provisioning, Configuration, Service, SETTINGS, ScenarioExecutor, EXEC
36 +
from bzt.modules.aggregator import DataPoint, KPISet, ResultsProvider
37 +
from bzt.modules.console import WidgetProvider, PrioritizedWidget
38 +
from bzt.modules.functional import FunctionalResultsReader, FunctionalSample
39 +
from bzt.modules.monitoring import LocalClient
40 +
from bzt.modules.selenium import SeleniumExecutor
41 +
from bzt.modules.services import Unpacker
42 +
from bzt.requests_model import has_variable_pattern
43 +
from bzt.utils import iteritems, open_browser, BetterDict, ExceptionalDownloader, ProgressBarContext
44 +
from bzt.utils import to_json, dehumanize_time, get_full_path, get_files_recursive, replace_in_config
45 +
from bzt.modules.blazemeter.net_utils import get_with_retry
46 +
from bzt.modules.blazemeter.blazemeter_reporter import BlazeMeterUploader
47 +
from bzt.modules.blazemeter.cloud_test import FUNC_API_TEST_TYPE, FUNC_GUI_TEST_TYPE, TAURUS_TEST_TYPE
48 +
from bzt.modules.blazemeter.project_finder import ProjectFinder
49 +
from bzt.modules.blazemeter.const import DEDICATED_IPS
50 +
51 +
52 +
CLOUD_CONFIG_BLACK_LIST = {
53 +
    "settings": {
54 +
        "artifacts-dir": True,
55 +
        "aggregator": True,
56 +
        "proxy": True,
57 +
        "check-updates": True
58 +
    },
59 +
    "included-configs": True,
60 +
    "cli": True,
61 +
    "cli-aliases": True,
62 +
    "install-id": True,
63 +
    "version": True,
64 +
    "modules": {
65 +
        "jmeter": {
66 +
            "path": True,
67 +
            "protocol-handlers": True
68 +
        },
69 +
        "ab": {
70 +
            "path": True
71 +
        },
72 +
        "gatling": {
73 +
            "path": True
74 +
        },
75 +
        "grinder": {
76 +
            "path": True
77 +
        },
78 +
        "junit": {
79 +
            "path": True
80 +
        },
81 +
        "molotov": {
82 +
            "path": True
83 +
        },
84 +
        "siege": {
85 +
            "path": True
86 +
        },
87 +
        "testng": {
88 +
            "path": True
89 +
        },
90 +
        "tsung": {
91 +
            "path": True
92 +
        },
93 +
        "console": {
94 +
            "disable": True,
95 +
        },
96 +
        "blazemeter": {
97 +
            "address": True,
98 +
            "data-address": True,
99 +
            "token": True,
100 +
        },
101 +
        "cloud": {
102 +
            "address": True,
103 +
            "data-address": True,
104 +
            "token": True,
105 +
        },
106 +
    },
107 +
    "provisioning": True,
108 +
109 +
}
110 +
111 +
112 +
class MasterProvisioning(Provisioning):
113 +
    def get_rfiles(self):
114 +
        rfiles = []
115 +
        additional_files = []
116 +
        for executor in self.executors:
117 +
            executor_rfiles = executor.get_resource_files()
118 +
            config = to_json(self.engine.config.get('execution'))
119 +
            config += to_json(self.engine.config.get('scenarios'))
120 +
            config += to_json(executor.settings)
121 +
            for rfile in executor_rfiles:
122 +
                if has_variable_pattern(rfile):
123 +
                    continue
124 +
125 +
                if not os.path.exists(self.engine.find_file(rfile)):
126 +
                    raise TaurusConfigError("%s: resource file '%s' not found" % (executor, rfile))
127 +
                if to_json(rfile) not in config:  # TODO: might be check is needed to improve
128 +
                    additional_files.append(rfile)
129 +
            rfiles += executor_rfiles
130 +
131 +
        if additional_files:
132 +
            raise TaurusConfigError("Following files can't be handled in cloud: %s" % additional_files)
133 +
134 +
        rfiles = list(set(rfiles))
135 +
        rfiles = [x for x in rfiles if not has_variable_pattern(x)]
136 +
        self.log.debug("All resource files are: %s", rfiles)
137 +
        return rfiles
138 +
139 +
    def _fix_filenames(self, old_names):
140 +
        # check for concurrent base names
141 +
        old_full_names = [self.engine.find_file(x) for x in old_names]
142 +
        rbases = [os.path.basename(get_full_path(rfile)) for rfile in old_full_names]
143 +
        rpaths = [get_full_path(rfile, step_up=1) for rfile in old_full_names]
144 +
        while rbases:
145 +
            base, path = rbases.pop(), rpaths.pop()
146 +
            if base in rbases:
147 +
                index = rbases.index(base)
148 +
                if path != rpaths[index]:
149 +
                    msg = 'Resource "%s" occurs more than one time, rename to avoid data loss'
150 +
                    raise TaurusConfigError(msg % base)
151 +
152 +
        old_full_names = self.__pack_dirs(old_full_names)
153 +
        new_base_names = [os.path.basename(f) for f in old_full_names]
154 +
        self.log.debug('Replace file names in config: %s with %s', old_names, new_base_names)
155 +
        replace_in_config(self.engine.config, old_names, new_base_names, log=self.log)
156 +
        old_full_names = list(set(old_full_names))
157 +
        return old_full_names
158 +
159 +
    def __pack_dirs(self, source_list):
160 +
        result_list = []  # files for upload
161 +
        packed_list = []  # files for unpacking
162 +
163 +
        for source in source_list:
164 +
            source = get_full_path(source)
165 +
            if os.path.isfile(source):
166 +
                result_list.append(source)
167 +
            else:  # source is dir
168 +
                self.log.debug("Compress directory '%s'", source)
169 +
                base_dir_name = os.path.basename(source)
170 +
                zip_name = self.engine.create_artifact(base_dir_name, '.zip')
171 +
                relative_prefix_len = len(os.path.dirname(source))
172 +
                with zipfile.ZipFile(zip_name, 'w') as zip_file:
173 +
                    for _file in get_files_recursive(source):
174 +
                        zip_file.write(_file, _file[relative_prefix_len:])
175 +
                result_list.append(zip_name)
176 +
                packed_list.append(base_dir_name + '.zip')
177 +
178 +
        if packed_list:
179 +
            services = self.engine.config.get(Service.SERV, [], force_set=True)
180 +
            unpacker = BetterDict.from_dict({'module': Unpacker.UNPACK, Unpacker.FILES: packed_list, 'run-at': 'local'})
181 +
            services.append(unpacker)
182 +
183 +
        return result_list
184 +
185 +
186 +
class CloudProvisioning(MasterProvisioning, WidgetProvider):
187 +
    """
188 +
    :type user: bzt.bza.User
189 +
    :type router: BaseCloudTest
190 +
    :type _workspaces: bzt.bza.BZAObjectsList[bzt.bza.Workspace]
191 +
    """
192 +
193 +
    def __init__(self):
194 +
        super(CloudProvisioning, self).__init__()
195 +
        self.results_url = None
196 +
        self.results_reader = None
197 +
        self.user = User()
198 +
        self.__last_master_status = None
199 +
        self.browser_open = 'start'
200 +
        self.widget = None
201 +
        self.detach = False
202 +
        self.router = None
203 +
        self.test_ended = False
204 +
        self.check_interval = 5.0
205 +
        self._last_check_time = None
206 +
        self.public_report = False
207 +
        self.report_name = None
208 +
        self._workspaces = []
209 +
        self.launch_existing_test = None
210 +
        self.disallow_empty_execution = False
211 +
212 +
    @staticmethod
213 +
    def merge_with_blazemeter_config(module):
214 +
        if 'blazemeter' not in module.engine.config.get('modules'):
215 +
            module.log.debug("Module 'blazemeter' wasn't found in base config")
216 +
            return
217 +
        bm_mod = module.engine.instantiate_module('blazemeter')
218 +
        bm_settings = copy.deepcopy(bm_mod.settings)
219 +
        bm_settings.update(module.settings)
220 +
        module.settings = bm_settings
221 +
222 +
    def prepare(self):
223 +
        reporting = self.engine.config.get(Reporter.REP)
224 +
225 +
        CloudProvisioning.merge_with_blazemeter_config(self)
226 +
        CloudProvisioning.configure_client(self)
227 +
        self._workspaces = self.user.accounts().workspaces()
228 +
        if not self._workspaces:
229 +
            raise TaurusNetworkError("Your account has no active workspaces, please contact BlazeMeter support")
230 +
231 +
        self.__dump_locations_if_needed()
232 +
233 +
        super(CloudProvisioning, self).prepare()
234 +
        self.browser_open = self.settings.get("browser-open", self.browser_open)
235 +
        self.detach = self.settings.get("detach", self.detach)
236 +
        self.check_interval = dehumanize_time(self.settings.get("check-interval", self.check_interval))
237 +
        self.public_report = self.settings.get("public-report", self.public_report)
238 +
        is_execution_empty = not self.engine.config.get("execution")
239 +
        self.launch_existing_test = self.settings.get("launch-existing-test", is_execution_empty, force_set=True)
240 +
241 +
        if not self.launch_existing_test:
242 +
            self._filter_reporting()
243 +
244 +
        finder = ProjectFinder(self.parameters, self.settings, self.user, self._workspaces, self.log)
245 +
        finder.default_test_name = "Taurus Cloud Test"
246 +
247 +
        test_type = self.settings.get("test-type")  # user test type. should we mention it in doc?
248 +
        if not test_type:
249 +
            func_mode = self.engine.is_functional_mode()
250 +
            gui_mode = func_mode and (
251 +
                    (len(self.executors) == 1) and
252 +
                    isinstance(self.executors[0], SeleniumExecutor))
253 +
254 +
            if func_mode:
255 +
                if gui_mode:
256 +
                    test_type = FUNC_GUI_TEST_TYPE
257 +
                else:
258 +
                    test_type = FUNC_API_TEST_TYPE
259 +
            else:
260 +
                test_type = TAURUS_TEST_TYPE
261 +
262 +
        finder.test_type = test_type
263 +
264 +
        self.router = finder.get_test_router()
265 +
266 +
        if not self.launch_existing_test:
267 +
            self.router.prepare_locations(self.executors, self.engine.config)
268 +
269 +
            res_files = self.get_rfiles()
270 +
            files_for_cloud = self._fix_filenames(res_files)
271 +
272 +
            config_for_cloud = self.prepare_cloud_config()
273 +
            config_for_cloud.dump(self.engine.create_artifact("cloud", ""))
274 +
            del_files = self.settings.get("delete-test-files", True)
275 +
            self.router.resolve_test(config_for_cloud, files_for_cloud, del_files)
276 +
277 +
        self.router.sanitize_test()
278 +
279 +
        self.report_name = self.settings.get("report-name", self.report_name)
280 +
        if self.report_name == 'ask' and sys.stdin.isatty():
281 +
            self.report_name = input("Please enter report-name: ")
282 +
283 +
        self.widget = self.get_widget()
284 +
285 +
        if self.engine.is_functional_mode():
286 +
            self.results_reader = FunctionalBZAReader(self.log)
287 +
            self.engine.aggregator.add_underling(self.results_reader)
288 +
        else:
289 +
            self.results_reader = ResultsFromBZA()
290 +
            self.results_reader.log = self.log
291 +
            self.engine.aggregator.add_underling(self.results_reader)
292 +
293 +
        validate_passfail = any(reporter.get('module') == 'passfail' for reporter in reporting)
294 +
295 +
        if validate_passfail:
296 +
            if self.router._test.started_passfail_validation():
297 +
                timeout = 100
298 +
                for i in range(timeout):
299 +
                    if self.router._test.get_passfail_validation():
300 +
                        return
301 +
                    self.log.warning(f"Unsuccessful Passfail validation attempt [{i+1}]. Retrying...")
302 +
                    if not i % 10:
303 +
                        self.log.warning("Please keep in mind that validation can take time.")
304 +
                    sleep(1)
305 +
                self.log.error("Unable get Passfail validation!")
306 +
            else:
307 +
                self.log.error("Unable to validate Passfail configuration!")
308 +
309 +
    @staticmethod
310 +
    def _get_other_modules(config):
311 +
        used_classes = LocalClient.__name__, BlazeMeterUploader.__name__
312 +
        used_modules = []
313 +
314 +
        for module in config.get("modules"):
315 +
            class_name = config.get("modules").get(module).get("class")
316 +
            if class_name and (class_name.split('.')[-1] in used_classes):
317 +
                used_modules.append(module)
318 +
        return used_modules
319 +
320 +
    def _get_executors(self):
321 +
        executors = []
322 +
        for executor in self.executors:
323 +
            executors.append(executor.execution.get("executor"))
324 +
            if isinstance(executor, SeleniumExecutor):
325 +
                executors.append(executor.runner.execution.get("executor"))
326 +
327 +
        return executors
328 +
329 +
    def _filter_unused_modules(self, config, provisioning):
330 +
        services = [service.get("module") for service in config.get(Service.SERV)]
331 +
        reporters = [reporter.get("module") for reporter in config.get(Reporter.REP)]
332 +
        consolidator = config.get(SETTINGS).get("aggregator")
333 +
334 +
        used_modules = self._get_executors() + self._get_other_modules(config)
335 +
        used_modules += services + reporters + [consolidator, provisioning]
336 +
337 +
        modules = set(config.get("modules").keys())
338 +
        for module in modules:
339 +
            if config.get("modules")[module].get("send-to-blazemeter"):
340 +
                continue
341 +
            if module not in used_modules:
342 +
                del config.get("modules")[module]
343 +
            elif config.get("modules")[module].get("class"):
344 +
                del config.get("modules")[module]["class"]
345 +
346 +
    def prepare_cloud_config(self):
347 +
        # expand concurrency and throughput
348 +
        for executor in self.executors:
349 +
            executor.get_load()
350 +
351 +
        config = copy.deepcopy(self.engine.config)
352 +
353 +
        provisioning = config.get(Provisioning.PROV)
354 +
        self._filter_unused_modules(config, provisioning)
355 +
356 +
        # todo: should we remove config['version'] before sending to cloud?
357 +
        config['local-bzt-version'] = config.get('version', 'N/A')
358 +
359 +
        config.filter(CLOUD_CONFIG_BLACK_LIST, black_list=True)
360 +
361 +
        for execution in config[EXEC]:
362 +
            if execution.get("files") == []:
363 +
                del execution["files"]
364 +
365 +
            for param in (ScenarioExecutor.CONCURR, ScenarioExecutor.THRPT):
366 +
                param_value = execution.get(param).get(provisioning, None)
367 +
                if param_value is None:
368 +
                    del execution[param]
369 +
                else:
370 +
                    execution[param] = param_value
371 +
372 +
        if self.router.dedicated_ips:
373 +
            config[DEDICATED_IPS] = True
374 +
375 +
        assert isinstance(config, Configuration)
376 +
        return config
377 +
378 +
    def __dump_locations_if_needed(self):
379 +
        if self.settings.get("dump-locations", False):
380 +
            locations = {}
381 +
            for loc in self._workspaces.locations(include_private=True):
382 +
                locations[loc['id']] = loc
383 +
384 +
            data = [("ID", "Name")]
385 +
            for location_id in sorted(locations):
386 +
                location = locations[location_id]
387 +
                data.append((location_id, location['title']))
388 +
            table = SingleTable(data) if sys.stdout and sys.stdout.isatty() else AsciiTable(data)
389 +
            self.log.warning("Dumping available locations instead of running the test:\n%s", table.table)
390 +
            raise NormalShutdown("Done listing locations")
391 +
392 +
    def _filter_reporting(self):
393 +
        reporting = self.engine.config.get(Reporter.REP, [])
394 +
        new_reporting = []
395 +
        for index, reporter in enumerate(reporting):
396 +
            exc = TaurusConfigError("'module' attribute not found in %s" % reporter)
397 +
            cls = reporter.get('module', exc)
398 +
            if cls == "blazemeter":
399 +
                self.log.warning("Explicit blazemeter reporting is skipped for cloud")
400 +
            else:
401 +
                new_reporting.append(reporter)
402 +
403 +
        self.engine.config[Reporter.REP] = new_reporting
404 +
405 +
    @staticmethod
406 +
    def configure_client(module):
407 +
        module.user.log = module.log
408 +
        module.user.logger_limit = module.settings.get("request-logging-limit", module.user.logger_limit)
409 +
        module.user.address = module.settings.get("address", module.user.address)
410 +
        module.user.token = module.settings.get("token", module.user.token)
411 +
        module.user.timeout = dehumanize_time(module.settings.get("timeout", module.user.timeout))
412 +
        if isinstance(module.user.http_session, requests.Session):
413 +
            module.log.debug("Installing http client")
414 +
            module.user.http_session = module.engine.get_http_client()
415 +
            module.user.http_request = module.user.http_session.request
416 +
        if not module.user.token:
417 +
            raise TaurusConfigError("You must provide API token to use cloud provisioning")
418 +
419 +
    def startup(self):
420 +
        super(CloudProvisioning, self).startup()
421 +
        self.results_url = self.router.launch_test()
422 +
        self.log.info("Started cloud test: %s", self.results_url)
423 +
        if self.results_url:
424 +
            if self.browser_open in ('start', 'both'):
425 +
                open_browser(self.results_url)
426 +
427 +
        if self.user.token and self.public_report:
428 +
            public_link = self.router.master.make_report_public()
429 +
            self.log.info("Public report link: %s", public_link)
430 +
431 +
        if self.report_name:
432 +
            self.router.master.set({"name": str(self.report_name)})
433 +
434 +
    def _should_skip_check(self):
435 +
        now = time.time()
436 +
        if self._last_check_time is None:
437 +
            return False
438 +
        elif now >= self._last_check_time + self.check_interval:
439 +
            return False
440 +
        else:
441 +
            return True
442 +
443 +
    def check(self):
444 +
        if self.detach:
445 +
            self.log.warning('Detaching Taurus from started test...')
446 +
            return True
447 +
448 +
        if self._should_skip_check():
449 +
            self.log.debug("Skipping cloud status check")
450 +
            return False
451 +
452 +
        self._last_check_time = time.time()
453 +
454 +
        master = self._check_master_status()
455 +
        status = master.get('status')
456 +
        progress = master.get('progress')   # number value of status, see BZA API
457 +
458 +
        if status != self.__last_master_status:
459 +
            self.__last_master_status = status
460 +
            self.log.info("Cloud test status: %s", status)
461 +
462 +
        if self.results_reader and progress and progress >= BZA_TEST_DATA_RECEIVED:
463 +
            self.results_reader.master = self.router.master
464 +
465 +
        if progress == ENDED:
466 +
            self.log.info("Test was stopped in the cloud: %s", status)
467 +
            self.test_ended = True
468 +
            return True
469 +
470 +
        self.router.start_if_ready()
471 +
472 +
        self.widget.update()
473 +
        return super(CloudProvisioning, self).check()
474 +
475 +
    @get_with_retry
476 +
    def _check_master_status(self):
477 +
        return self.router.get_master_status()
478 +
479 +
    def post_process(self):
480 +
        self.log.warning('Part of result data might be missed here due to BM API specifics')
481 +
482 +
        if not self.detach and self.router and not self.test_ended:
483 +
            self.router.stop_test()
484 +
485 +
        if self.results_url:
486 +
            if self.browser_open in ('end', 'both'):
487 +
                open_browser(self.results_url)
488 +
489 +
        if self.router and self.router.master:
490 +
            full = self.router.master.get_full()
491 +
            if 'note' in full and full['note']:
492 +
                self.log.warning("Cloud test has probably failed with message: %s", full['note'])
493 +
494 +
            for session in full.get('sessions', ()):
495 +
                for error in session.get("errors", ()):
496 +
                    raise TaurusException(to_json(error))
497 +
498 +
            if "hasThresholds" in full and full["hasThresholds"]:
499 +
                thresholds = self.router.master.get_thresholds()
500 +
                for item in thresholds.get('data', []):
501 +
                    if item.get('success', None) is False:
502 +
                        reason = None
503 +
                        for assertion in item.get('assertions', []):
504 +
                            if assertion.get('success', None) is False:
505 +
                                criterion = assertion.get('field', '')
506 +
                                label = assertion.get('label', '')
507 +
                                reason = "Cloud failure criterion %r (on label %r) was met" % (criterion, label)
508 +
                                break
509 +
                        if reason is None:
510 +
                            reason = "Cloud tests failed because failure criteria were met"
511 +
                        self.log.warning(reason)
512 +
                        raise AutomatedShutdown(reason)
513 +
514 +
            # if we have captured HARs, let's download them
515 +
            for service in self.engine.config.get(Service.SERV, []):
516 +
                mod = service.get('module', TaurusConfigError("No 'module' specified for service"))
517 +
                assert isinstance(mod, str), mod
518 +
                module = self.engine.instantiate_module(mod)
519 +
                if isinstance(module, ServiceStubCaptureHAR):
520 +
                    self._download_logs()
521 +
                    break
522 +
523 +
            if "functionalSummary" in full:
524 +
                summary = full["functionalSummary"]
525 +
                if summary is None or summary.get("isFailed", False):
526 +
                    raise AutomatedShutdown("Cloud tests failed")
527 +
528 +
    def _download_logs(self):
529 +
        for session in self.router.master.sessions():
530 +
            assert isinstance(session, Session)
531 +
            for log in session.get_logs():
532 +
                self.log.info("Downloading %s from the cloud", log['filename'])
533 +
                cloud_dir = os.path.join(self.engine.artifacts_dir, 'cloud-artifacts')
534 +
                if not os.path.exists(cloud_dir):
535 +
                    os.makedirs(cloud_dir)
536 +
                dest = os.path.join(cloud_dir, log['filename'])
537 +
                dwn = ExceptionalDownloader(self.engine.get_http_client())
538 +
                with ProgressBarContext() as pbar:
539 +
                    try:
540 +
                        dwn.get(log['dataUrl'], dest, reporthook=pbar.download_callback)
541 +
                    except BaseException:
542 +
                        self.log.debug("Error is: %s", traceback.format_exc())
543 +
                        self.log.warning("Failed to download from %s", log['dataUrl'])
544 +
                        continue
545 +
546 +
                    if log['filename'].startswith('artifacts') and log['filename'].endswith('.zip'):
547 +
                        with zipfile.ZipFile(dest) as zipf:
548 +
                            for name in zipf.namelist():
549 +
                                ext = name.split('.')[-1].lower()
550 +
                                if ext in ('har', 'jpg', 'js', 'html', 'css'):
551 +
                                    self.log.debug("Extracting %s to %s", name, cloud_dir)
552 +
                                    zipf.extract(name, cloud_dir)
553 +
554 +
    def get_widget(self):
555 +
        if not self.widget:
556 +
            self.widget = CloudProvWidget(self.router)
557 +
        return self.widget
558 +
559 +
560 +
class ResultsFromBZA(ResultsProvider):
561 +
    """
562 +
    :type master: bzt.bza.Master
563 +
    """
564 +
565 +
    def __init__(self, master=None):
566 +
        super(ResultsFromBZA, self).__init__()
567 +
        self.master = master
568 +
        self.min_ts = 0
569 +
        self.log = logging.getLogger('')
570 +
        self.prev_errors = BetterDict()
571 +
        self.cur_errors = BetterDict()
572 +
        self.handle_errors = True
573 +
574 +
    def _get_err_diff(self):
575 +
        # find diff of self.prev_errors and self.cur_errors
576 +
        diff = {}
577 +
        for label in self.cur_errors:
578 +
            if label not in self.prev_errors:
579 +
                diff[label] = self.cur_errors[label]
580 +
                continue
581 +
582 +
            for msg in self.cur_errors[label]:
583 +
                if msg not in self.prev_errors[label]:
584 +
                    prev_count = 0
585 +
                else:
586 +
                    prev_count = self.prev_errors[label][msg]['count']
587 +
588 +
                delta = self.cur_errors[label][msg]['count'] - prev_count
589 +
                if delta > 0:
590 +
                    if label not in diff:
591 +
                        diff[label] = {}
592 +
                    diff[label][msg] = {'count': delta, 'rc': self.cur_errors[label][msg]['rc']}
593 +
594 +
        return {k: diff[k] for k in diff if diff[k]}  # clean from empty items
595 +
596 +
    def _calculate_datapoints(self, final_pass=False):
597 +
        if self.master is None:
598 +
            return
599 +
600 +
        data, aggr_raw = self.query_data()
601 +
        aggr = {}
602 +
        for label in aggr_raw:
603 +
            aggr[label['labelName']] = label
604 +
605 +
        for label in data:
606 +
            if label.get('kpis') and not final_pass:
607 +
                label['kpis'].pop(-1)  # never take last second since it could be incomplete
608 +
609 +
        timestamps = []
610 +
        for label in data:
611 +
            if label.get('label') == 'ALL':
612 +
                timestamps.extend([kpi['ts'] for kpi in label.get('kpis', [])])
613 +
614 +
        self.handle_errors = True
615 +
616 +
        for tstmp in timestamps:
617 +
            point = DataPoint(tstmp)
618 +
            point[DataPoint.SOURCE_ID] = self.master['id']
619 +
            self.__generate_kpisets(aggr, data, point, tstmp)
620 +
621 +
            if self.handle_errors:
622 +
                self.handle_errors = False
623 +
                self.cur_errors = self.__get_errors_from_bza()
624 +
                err_diff = self._get_err_diff()
625 +
                if err_diff:
626 +
                    self.__add_err_diff(point, err_diff)
627 +
                    self.prev_errors = self.cur_errors
628 +
629 +
            point.recalculate()
630 +
631 +
            self.min_ts = point[DataPoint.TIMESTAMP] + 1
632 +
            yield point
633 +
634 +
    def __add_err_diff(self, point, err_diff):
635 +
        for label in err_diff:
636 +
            point_label = '' if label == 'ALL' else label
637 +
            if point_label not in point[DataPoint.CURRENT]:
638 +
                self.log.warning("Got inconsistent kpi/error data for label: %s", point_label)
639 +
                kpiset = KPISet()
640 +
                point[DataPoint.CURRENT][point_label] = kpiset
641 +
                kpiset[KPISet.SAMPLE_COUNT] = sum([item['count'] for item in err_diff[label].values()])
642 +
            else:
643 +
                kpiset = point[DataPoint.CURRENT][point_label]
644 +
645 +
            kpiset[KPISet.ERRORS] = self.__get_kpi_errors(err_diff[label])
646 +
            kpiset[KPISet.FAILURES] = sum([x['cnt'] for x in kpiset[KPISet.ERRORS]])
647 +
            kpiset[KPISet.SAMPLE_COUNT] = kpiset[KPISet.SUCCESSES] + kpiset[KPISet.FAILURES]
648 +
            assert kpiset[KPISet.SAMPLE_COUNT] > 0, point_label
649 +
650 +
    def __generate_kpisets(self, aggr, data, point, tstmp):
651 +
        for label in data:
652 +
            for kpi in label.get('kpis', []):
653 +
                if kpi['ts'] != tstmp:
654 +
                    continue
655 +
                label_str = label.get('label')
656 +
                if label_str is None or label_str not in aggr:
657 +
                    self.log.warning("Skipping inconsistent data from API for label: %s", label_str)
658 +
                    continue
659 +
660 +
                if kpi['n'] <= 0:
661 +
                    self.log.warning("Skipping empty KPI item got from API: %s", kpi)
662 +
                    continue
663 +
664 +
                kpiset = self.__get_kpiset(aggr, kpi, label_str)
665 +
                point[DataPoint.CURRENT]['' if label_str == 'ALL' else label_str] = kpiset
666 +
667 +
    def __get_errors_from_bza(self):
668 +
        #
669 +
        # This method reads error report from BZA
670 +
        #
671 +
        # internal errors format:
672 +
        # <request_label>:
673 +
        #   <error_message>:
674 +
        #     'count': <count of errors>
675 +
        #     'rc': <response code>
676 +
        #
677 +
        result = {}
678 +
        try:
679 +
            errors = self.master.get_errors()
680 +
        except (URLError, TaurusNetworkError):
681 +
            self.log.warning("Failed to get errors, will retry in %s seconds...", self.master.timeout)
682 +
            self.log.debug("Full exception: %s", traceback.format_exc())
683 +
            time.sleep(self.master.timeout)
684 +
            errors = self.master.get_errors()
685 +
            self.log.info("Succeeded with retry")
686 +
687 +
        for e_record in errors:
688 +
            _id = e_record["_id"]
689 +
            if _id == "ALL":
690 +
                _id = ""
691 +
            result[_id] = {}
692 +
            for error in e_record['errors']:
693 +
                result[_id][error['m']] = {'count': error['count'], 'rc': error['rc']}
694 +
            for assertion in e_record['assertions']:
695 +
                result[_id][assertion['failureMessage']] = {'count': assertion['failures'], 'rc': assertion['name']}
696 +
        return result
697 +
698 +
    def __get_kpi_errors(self, errors):
699 +
        result = []
700 +
        for msg in errors:
701 +
            kpi_error = KPISet.error_item_skel(
702 +
                error=msg,
703 +
                ret_c=errors[msg]['rc'],
704 +
                cnt=errors[msg]['count'],
705 +
                errtype=KPISet.ERRTYPE_ERROR,  # TODO: what about asserts?
706 +
                urls=Counter(), tag=None)
707 +
            result.append(kpi_error)
708 +
        return result
709 +
710 +
    def __get_kpiset(self, aggr, kpi, label):
711 +
        kpiset = KPISet()
712 +
        kpiset[KPISet.FAILURES] = kpi['ec']
713 +
        kpiset[KPISet.CONCURRENCY] = kpi['na']
714 +
        kpiset[KPISet.SAMPLE_COUNT] = kpi['n']
715 +
        assert kpi['n'] > 0 and kpi['n'] >= kpi['ec']
716 +
        kpiset[KPISet.SUCCESSES] = kpi['n'] - kpi['ec']
717 +
        kpiset.sum_rt += kpi['t_avg'] * kpi['n'] / 1000.0
718 +
        kpiset.sum_lt += kpi['lt_avg'] * kpi['n'] / 1000.0
719 +
        perc_map = {'90line': 90.0, "95line": 95.0, "99line": 99.0}
720 +
        for field, level in iteritems(perc_map):
721 +
            kpiset[KPISet.PERCENTILES][str(level)] = aggr[label][field] / 1000.0
722 +
        return kpiset
723 +
724 +
    def query_data(self):
725 +
        try:
726 +
            data = self.master.get_kpis(self.min_ts)
727 +
        except (URLError, TaurusNetworkError):
728 +
            self.log.warning("Failed to get result KPIs, will retry in %s seconds...", self.master.timeout)
729 +
            self.log.debug("Full exception: %s", traceback.format_exc())
730 +
            time.sleep(self.master.timeout)
731 +
            data = self.master.get_kpis(self.min_ts)
732 +
            self.log.info("Succeeded with retry")
733 +
734 +
        try:
735 +
            aggr = self.master.get_aggregate_report()
736 +
        except (URLError, TaurusNetworkError):
737 +
            self.log.warning("Failed to get aggregate results, will retry in %s seconds...", self.master.timeout)
738 +
            self.log.debug("Full exception: %s", traceback.format_exc())
739 +
            time.sleep(self.master.timeout)
740 +
            aggr = self.master.get_aggregate_report()
741 +
            self.log.info("Succeeded with retry")
742 +
743 +
        return data, aggr
744 +
745 +
746 +
class FunctionalBZAReader(FunctionalResultsReader):
747 +
    def __init__(self, parent_log, master=None):
748 +
        super(FunctionalBZAReader, self).__init__()
749 +
        self.master = master
750 +
        self.log = parent_log.getChild(self.__class__.__name__)
751 +
752 +
    @staticmethod
753 +
    def extract_samples_from_group(group, group_summary):
754 +
        group_name = group_summary.get("name") or "Tests"
755 +
        for sample in group["samples"]:
756 +
            status = "PASSED"
757 +
            if sample["error"]:
758 +
                status = "FAILED"
759 +
            error_msg = ""
760 +
            error_trace = ""
761 +
            assertions = sample.get("assertions")
762 +
            if assertions:
763 +
                for assertion in assertions:
764 +
                    if assertion.get("isFailed"):
765 +
                        error_msg = assertion.get("errorMessage")
766 +
                        status = "BROKEN"
767 +
768 +
            rtm = sample.get("responseTime") or 0.0
769 +
            yield FunctionalSample(
770 +
                test_case=sample["label"],
771 +
                test_suite=group_name,
772 +
                status=status,
773 +
                start_time=int(sample["created"]),
774 +
                duration=rtm / 1000.0,
775 +
                error_msg=error_msg,
776 +
                error_trace=error_trace,
777 +
                extras={},
778 +
                subsamples=[],
779 +
            )
780 +
781 +
    def read(self, last_pass=False):
782 +
        if self.master is None:
783 +
            return
784 +
785 +
        if last_pass:
786 +
            try:
787 +
                groups = self.master.get_functional_report_groups()
788 +
            except (URLError, TaurusNetworkError):
789 +
                self.log.warning("Failed to get test groups, will retry in %s seconds...", self.master.timeout)
790 +
                self.log.debug("Full exception: %s", traceback.format_exc())
791 +
                time.sleep(self.master.timeout)
792 +
                groups = self.master.get_functional_report_groups()
793 +
                self.log.info("Succeeded with retry")
794 +
795 +
            for group_summary in groups:
796 +
                group_id = group_summary['groupId']
797 +
                try:
798 +
                    group = self.master.get_functional_report_group(group_id)
799 +
                except (URLError, TaurusNetworkError):
800 +
                    self.log.warning("Failed to get test group, will retry in %s seconds...", self.master.timeout)
801 +
                    self.log.debug("Full exception: %s", traceback.format_exc())
802 +
                    time.sleep(self.master.timeout)
803 +
                    group = self.master.get_functional_report_group(group_id)
804 +
                    self.log.info("Succeeded with retry")
805 +
806 +
                for sample in self.extract_samples_from_group(group, group_summary):
807 +
                    yield sample
808 +
809 +
810 +
class CloudProvWidget(Pile, PrioritizedWidget):
811 +
    def __init__(self, test):
812 +
        """
813 +
        :type test: BaseCloudTest
814 +
        """
815 +
        self.test = test
816 +
        self.text = Text("")
817 +
        super(CloudProvWidget, self).__init__([self.text])
818 +
        PrioritizedWidget.__init__(self)
819 +
820 +
    def update(self):
821 +
        txt = self.test.get_test_status_text()
822 +
        if txt:
823 +
            self.text.set_text(txt)
824 +
825 +
826 +
class ServiceStubScreenshoter(Service):
827 +
    def startup(self):
828 +
        if not isinstance(self.engine.provisioning, CloudProvisioning):
829 +
            self.log.warning("Stub for service 'screenshoter', use cloud provisioning to have it working")
830 +
831 +
832 +
class ServiceStubCaptureHAR(Service):
833 +
    def startup(self):
834 +
        if not isinstance(self.engine.provisioning, CloudProvisioning):
835 +
            self.log.warning("Stub for service 'capturehar', use cloud provisioning to have it working")

@@ -0,0 +1,4 @@
Loading
1 +
DEDICATED_IPS = "dedicated-ips"
2 +
LOC = "locations"
3 +
LOC_WEIGHTED = "locations-weighted"
4 +
NOTE_SIZE_LIMIT = 2048

@@ -0,0 +1,82 @@
Loading
1 +
"""
2 +
Module for reporting into http://www.blazemeter.com/ service
3 +
4 +
Copyright 2015 BlazeMeter Inc.
5 +
6 +
Licensed under the Apache License, Version 2.0 (the "License");
7 +
you may not use this file except in compliance with the License.
8 +
You may obtain a copy of the License at
9 +
10 +
   http://www.apache.org/licenses/LICENSE-2.0
11 +
12 +
Unless required by applicable law or agreed to in writing, software
13 +
distributed under the License is distributed on an "AS IS" BASIS,
14 +
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 +
See the License for the specific language governing permissions and
16 +
limitations under the License.
17 +
"""
18 +
import re
19 +
import time
20 +
import traceback
21 +
from collections import namedtuple
22 +
from functools import wraps
23 +
from ssl import SSLError
24 +
from urllib.error import URLError
25 +
26 +
from requests.exceptions import ReadTimeout
27 +
28 +
from bzt import TaurusNetworkError
29 +
30 +
NETWORK_PROBLEMS = (IOError, URLError, SSLError, ReadTimeout, TaurusNetworkError)
31 +
32 +
33 +
def send_with_retry(method):
34 +
    @wraps(method)
35 +
    def _impl(self, *args, **kwargs):
36 +
        try:
37 +
            method(self, *args, **kwargs)
38 +
        except (IOError, TaurusNetworkError):
39 +
            self.log.debug("Error sending data: %s", traceback.format_exc())
40 +
            self.log.warning("Failed to send data, will retry in %s sec...", self._user.timeout)
41 +
            try:
42 +
                time.sleep(self._user.timeout)
43 +
                method(self, *args, **kwargs)
44 +
                self.log.info("Succeeded with retry")
45 +
            except NETWORK_PROBLEMS:
46 +
                self.log.error("Fatal error sending data: %s", traceback.format_exc())
47 +
                self.log.warning("Will skip failed data and continue running")
48 +
49 +
    return _impl
50 +
51 +
52 +
def get_with_retry(method):
53 +
    @wraps(method)
54 +
    def _impl(self, *args, **kwargs):
55 +
        while True:
56 +
            try:
57 +
                return method(self, *args, **kwargs)
58 +
            except NETWORK_PROBLEMS:
59 +
                self.log.debug("Error making request: %s", traceback.format_exc())
60 +
                self.log.warning("Failed to make request, will retry in %s sec...", self.user.timeout)
61 +
                time.sleep(self.user.timeout)
62 +
63 +
    return _impl
64 +
65 +
66 +
def parse_blazemeter_test_link(link):
67 +
    """
68 +
    https://a.blazemeter.com/app/#/accounts/97961/workspaces/89846/projects/229969/tests/5823512
69 +
70 +
    :param link:
71 +
    :return:
72 +
    """
73 +
    if not isinstance(link, str):
74 +
        return None
75 +
76 +
    regex = r'https://a.blazemeter.com/app/#/accounts/(\d+)/workspaces/(\d+)/projects/(\d+)/tests/(\d+)(?:/\w+)?'
77 +
    match = re.match(regex, link)
78 +
    if match is None:
79 +
        return None
80 +
81 +
    TestParams = namedtuple('TestParams', 'account_id,workspace_id,project_id,test_id')
82 +
    return TestParams(*[int(x) for x in match.groups()])

@@ -373,8 +373,7 @@
Loading
373 373
            self.selenium_extras.add(method)
374 374
            elements.append(ast_call(
375 375
                func=ast_attr(method),
376 -
                args=[ast.Str(param, kind="")]
377 -
            ))
376 +
                args=[self._gen_expr(param.strip())]))
378 377
        elif atype == "close":
379 378
            method = "close_window"
380 379
            self.selenium_extras.add(method)
@@ -1465,9 +1464,10 @@
Loading
1465 1464
1466 1465
    @staticmethod
1467 1466
    def _escape_js_blocks(value):  # escapes plain { with {{
1468 -
        for block in re.finditer(r"(?<!\$){.*}", value):
1467 +
        value = value.replace("{", "{{").replace("}", "}}")
1468 +
        for block in re.finditer(r"\${{[\w\d]*}}", value):
1469 1469
            start, end = block.start(), block.end()
1470 -
            line = "{" + value[start:end] + "}"
1470 +
            line = "$" + value[start+2:end-1]
1471 1471
            value = value[:start] + line + value[end:]
1472 1472
        return value
1473 1473

@@ -0,0 +1,688 @@
Loading
1 +
"""
2 +
Module for reporting into http://www.blazemeter.com/ service
3 +
4 +
Copyright 2015 BlazeMeter Inc.
5 +
6 +
Licensed under the Apache License, Version 2.0 (the "License");
7 +
you may not use this file except in compliance with the License.
8 +
You may obtain a copy of the License at
9 +
10 +
   http://www.apache.org/licenses/LICENSE-2.0
11 +
12 +
Unless required by applicable law or agreed to in writing, software
13 +
distributed under the License is distributed on an "AS IS" BASIS,
14 +
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 +
See the License for the specific language governing permissions and
16 +
limitations under the License.
17 +
"""
18 +
import copy
19 +
import logging
20 +
import os
21 +
import platform
22 +
import sys
23 +
import time
24 +
import traceback
25 +
import zipfile
26 +
from collections import defaultdict, OrderedDict
27 +
from io import BytesIO
28 +
from urllib.error import HTTPError
29 +
30 +
import requests
31 +
32 +
from bzt import TaurusInternalException, TaurusConfigError, TaurusNetworkError
33 +
from bzt.bza import User, Session, Test
34 +
from bzt.engine import Reporter, Singletone
35 +
from bzt.utils import b, humanize_bytes, iteritems, open_browser, BetterDict, to_json, dehumanize_time
36 +
from bzt.modules.aggregator import AggregatorListener, DataPoint, KPISet, ResultsProvider
37 +
from bzt.modules.monitoring import Monitoring, MonitoringListener
38 +
from bzt.modules.blazemeter.project_finder import ProjectFinder
39 +
from bzt.modules.blazemeter.net_utils import send_with_retry
40 +
from bzt.modules.blazemeter.const import NOTE_SIZE_LIMIT
41 +
42 +
43 +
class BlazeMeterUploader(Reporter, AggregatorListener, MonitoringListener, Singletone):
44 +
    """
45 +
    Reporter class
46 +
47 +
    :type _test: bzt.bza.Test
48 +
    :type _master: bzt.bza.Master
49 +
    :type _session: bzt.bza.Session
50 +
    """
51 +
52 +
    def __init__(self):
53 +
        super(BlazeMeterUploader, self).__init__()
54 +
        self.browser_open = 'start'
55 +
        self.kpi_buffer = []
56 +
        self.send_interval = 30
57 +
        self._last_status_check = time.time()
58 +
        self.send_data = True
59 +
        self.upload_artifacts = True
60 +
        self.send_monitoring = True
61 +
        self.monitoring_buffer = None
62 +
        self.public_report = False
63 +
        self.last_dispatch = 0
64 +
        self.results_url = None
65 +
        self._user = User()
66 +
        self._test = None
67 +
        self._master = None
68 +
        self._session = None
69 +
        self.first_ts = sys.maxsize
70 +
        self.last_ts = 0
71 +
        self.report_name = None
72 +
        self._dpoint_serializer = DatapointSerializer(self)
73 +
74 +
    def prepare(self):
75 +
        """
76 +
        Read options for uploading, check that they're sane
77 +
        """
78 +
        super(BlazeMeterUploader, self).prepare()
79 +
        self.send_interval = dehumanize_time(self.settings.get("send-interval", self.send_interval))
80 +
        self.send_monitoring = self.settings.get("send-monitoring", self.send_monitoring)
81 +
        monitoring_buffer_limit = self.settings.get("monitoring-buffer-limit", 500)
82 +
        self.monitoring_buffer = MonitoringBuffer(monitoring_buffer_limit, self.log)
83 +
        self.browser_open = self.settings.get("browser-open", self.browser_open)
84 +
        self.public_report = self.settings.get("public-report", self.public_report)
85 +
        self.upload_artifacts = self.parameters.get("upload-artifacts", self.upload_artifacts)
86 +
        self._dpoint_serializer.multi = self.settings.get("report-times-multiplier", self._dpoint_serializer.multi)
87 +
        token = self.settings.get("token", "")
88 +
        if not token:
89 +
            self.log.warning("No BlazeMeter API key provided, will upload anonymously")
90 +
        self._user.token = token
91 +
92 +
        # usual fields
93 +
        self._user.logger_limit = self.settings.get("request-logging-limit", self._user.logger_limit)
94 +
        self._user.address = self.settings.get("address", self._user.address).rstrip("/")
95 +
        self._user.data_address = self.settings.get("data-address", self._user.data_address).rstrip("/")
96 +
        self._user.timeout = dehumanize_time(self.settings.get("timeout", self._user.timeout))
97 +
        if isinstance(self._user.http_session, requests.Session):
98 +
            self.log.debug("Installing http client")
99 +
            self._user.http_session = self.engine.get_http_client()
100 +
            self._user.http_request = self._user.http_session.request
101 +
102 +
        # direct data feeding case
103 +
        sess_id = self.parameters.get("session-id")
104 +
        if sess_id:
105 +
            self._session = Session(self._user, {'id': sess_id})
106 +
            self._session['userId'] = self.parameters.get("user-id", None)
107 +
            self._session['testId'] = self.parameters.get("test-id", None)
108 +
            self._test = Test(self._user, {'id': self._session['testId']})
109 +
            exc = TaurusConfigError("Need signature for session")
110 +
            self._session.data_signature = self.parameters.get("signature", exc)
111 +
            self._session.kpi_target = self.parameters.get("kpi-target", self._session.kpi_target)
112 +
            self.send_data = self.parameters.get("send-data", self.send_data)
113 +
        else:
114 +
            try:
115 +
                self._user.ping()  # to check connectivity and auth
116 +
            except HTTPError:
117 +
                self.log.error("Cannot reach online results storage, maybe the address/token is wrong")
118 +
                raise
119 +
120 +
            if token:
121 +
                wsp = self._user.accounts().workspaces()
122 +
                if not wsp:
123 +
                    raise TaurusNetworkError("Your account has no active workspaces, please contact BlazeMeter support")
124 +
                finder = ProjectFinder(self.parameters, self.settings, self._user, wsp, self.log)
125 +
                self._test = finder.resolve_external_test()
126 +
            else:
127 +
                self._test = Test(self._user, {'id': None})
128 +
129 +
        self.report_name = self.parameters.get("report-name", self.settings.get("report-name", self.report_name))
130 +
        if self.report_name == 'ask' and sys.stdin.isatty():
131 +
            self.report_name = input("Please enter report-name: ")
132 +
133 +
        if isinstance(self.engine.aggregator, ResultsProvider):
134 +
            self.engine.aggregator.add_listener(self)
135 +
136 +
        for service in self.engine.services:
137 +
            if isinstance(service, Monitoring):
138 +
                service.add_listener(self)
139 +
140 +
    def startup(self):
141 +
        """
142 +
        Initiate online test
143 +
        """
144 +
        super(BlazeMeterUploader, self).startup()
145 +
        self._user.log = self.log.getChild(self.__class__.__name__)
146 +
147 +
        if not self._session:
148 +
            url = self._start_online()
149 +
            self.log.info("Started data feeding: %s", url)
150 +
            if self.browser_open in ('start', 'both'):
151 +
                open_browser(url)
152 +
153 +
            if self._user.token and self.public_report:
154 +
                report_link = self._master.make_report_public()
155 +
                self.log.info("Public report link: %s", report_link)
156 +
157 +
    def _start_online(self):
158 +
        """
159 +
        Start online test
160 +
161 +
        """
162 +
        self.log.info("Initiating data feeding...")
163 +
164 +
        if self._test['id']:
165 +
            self._session, self._master = self._test.start_external()
166 +
        else:
167 +
            self._session, self._master, self.results_url = self._test.start_anonymous_external_test()
168 +
            self._test['id'] = self._session['testId']
169 +
170 +
        if self._test.token:
171 +
            self.results_url = self._master.address + '/app/#/masters/%s' % self._master['id']
172 +
            if self.report_name:
173 +
                self._session.set({"name": str(self.report_name)})
174 +
175 +
        return self.results_url
176 +
177 +
    def __get_jtls_and_more(self):
178 +
        """
179 +
        Compress all files in artifacts dir to single zipfile
180 +
        :rtype: (io.BytesIO,dict)
181 +
        """
182 +
        mfile = BytesIO()
183 +
        listing = {}
184 +
185 +
        logs = set()
186 +
        for handler in self.engine.log.parent.handlers:
187 +
            if isinstance(handler, logging.FileHandler):
188 +
                logs.add(handler.baseFilename)
189 +
190 +
        max_file_size = self.settings.get('artifact-upload-size-limit', 10) * 1024 * 1024  # 10MB
191 +
        with zipfile.ZipFile(mfile, mode='w', compression=zipfile.ZIP_DEFLATED, allowZip64=True) as zfh:
192 +
            for root, _, files in os.walk(self.engine.artifacts_dir):
193 +
                for filename in files:
194 +
                    full_path = os.path.join(root, filename)
195 +
                    if full_path in logs:
196 +
                        logs.remove(full_path)
197 +
198 +
                    fsize = os.path.getsize(full_path)
199 +
                    if fsize <= max_file_size:
200 +
                        zfh.write(full_path, os.path.join(os.path.relpath(root, self.engine.artifacts_dir), filename))
201 +
                        listing[full_path] = fsize
202 +
                    else:
203 +
                        msg = "File %s exceeds maximum size quota of %s and won't be included into upload"
204 +
                        self.log.warning(msg, filename, max_file_size)
205 +
206 +
            for filename in logs:  # upload logs unconditionally
207 +
                zfh.write(filename, os.path.basename(filename))
208 +
                listing[filename] = os.path.getsize(filename)
209 +
        return mfile, listing
210 +
211 +
    def __upload_artifacts(self):
212 +
        """
213 +
        If token provided, upload artifacts folder contents and bzt.log
214 +
        """
215 +
        if not self._session.token:
216 +
            return
217 +
218 +
        worker_index = self.engine.config.get('modules').get('shellexec').get('env').get('TAURUS_INDEX_ALL')
219 +
        if worker_index:
220 +
            suffix = '-%s' % worker_index
221 +
        else:
222 +
            suffix = ''
223 +
        artifacts_zip = "artifacts%s.zip" % suffix
224 +
        mfile, zip_listing = self.__get_jtls_and_more()
225 +
        self.log.info("Uploading all artifacts as %s ...", artifacts_zip)
226 +
        self._session.upload_file(artifacts_zip, mfile.getvalue())
227 +
        self._session.upload_file(artifacts_zip + '.tail.bz', self.__format_listing(zip_listing))
228 +
229 +
        handlers = self.engine.log.parent.handlers
230 +
        for handler in handlers:
231 +
            if isinstance(handler, logging.FileHandler):
232 +
                fname = handler.baseFilename
233 +
                self.log.info("Uploading %s", fname)
234 +
                fhead, ftail = os.path.splitext(os.path.split(fname)[-1])
235 +
                modified_name = fhead + suffix + ftail
236 +
                with open(fname, 'rb') as _file:
237 +
                    self._session.upload_file(modified_name, _file.read())
238 +
                    _file.seek(-4096, 2)
239 +
                    tail = _file.read()
240 +
                    tail = tail[tail.index(b("\n")) + 1:]
241 +
                    self._session.upload_file(modified_name + ".tail.bz", tail)
242 +
243 +
    def post_process(self):
244 +
        """
245 +
        Upload results if possible
246 +
        """
247 +
        if not self._session:
248 +
            self.log.debug("No feeding session obtained, nothing to finalize")
249 +
            return
250 +
251 +
        self.log.debug("KPI bulk buffer len in post-proc: %s", len(self.kpi_buffer))
252 +
        try:
253 +
            self.log.info("Sending remaining KPI data to server...")
254 +
            if self.send_data:
255 +
                self.__send_data(self.kpi_buffer, False, True)
256 +
                self.kpi_buffer = []
257 +
258 +
            if self.send_monitoring:
259 +
                self.__send_monitoring()
260 +
        finally:
261 +
            self._postproc_phase2()
262 +
263 +
        if self.results_url:
264 +
            if self.browser_open in ('end', 'both'):
265 +
                open_browser(self.results_url)
266 +
            self.log.info("Online report link: %s", self.results_url)
267 +
268 +
    def _postproc_phase2(self):
269 +
        try:
270 +
            if self.upload_artifacts:
271 +
                self.__upload_artifacts()
272 +
        except (IOError, TaurusNetworkError):
273 +
            self.log.warning("Failed artifact upload: %s", traceback.format_exc())
274 +
        finally:
275 +
            self._last_status_check = self.parameters.get('forced-last-check', self._last_status_check)
276 +
            self.log.debug("Set last check time to: %s", self._last_status_check)
277 +
278 +
            tries = self.send_interval  # NOTE: you dirty one...
279 +
            while not self._last_status_check and tries > 0:
280 +
                self.log.info("Waiting for ping...")
281 +
                time.sleep(self.send_interval)
282 +
                tries -= 1
283 +
284 +
            self._postproc_phase3()
285 +
286 +
    def _postproc_phase3(self):
287 +
        try:
288 +
            if self.send_data:
289 +
                self.end_online()
290 +
291 +
            if self._user.token and self.engine.stopping_reason:
292 +
                exc_class = self.engine.stopping_reason.__class__.__name__
293 +
                note = "%s: %s" % (exc_class, str(self.engine.stopping_reason))
294 +
                self.append_note_to_session(note)
295 +
                if self._master:
296 +
                    self.append_note_to_master(note)
297 +
298 +
        except KeyboardInterrupt:
299 +
            raise
300 +
        except BaseException as exc:
301 +
            self.log.debug("Failed to finish online: %s", traceback.format_exc())
302 +
            self.log.warning("Failed to finish online: %s", exc)
303 +
304 +
    def end_online(self):
305 +
        """
306 +
        Finish online test
307 +
        """
308 +
        if not self._session:
309 +
            self.log.debug("Feeding not started, so not stopping")
310 +
        else:
311 +
            self.log.info("Ending data feeding...")
312 +
            if self._user.token:
313 +
                self._session.stop()
314 +
            else:
315 +
                self._session.stop_anonymous()
316 +
317 +
    def append_note_to_session(self, note):
318 +
        self._session.fetch()
319 +
        if 'note' in self._session:
320 +
            note = self._session['note'] + '\n' + note
321 +
        note = note.strip()
322 +
        if note:
323 +
            self._session.set({'note': note[:NOTE_SIZE_LIMIT]})
324 +
325 +
    def append_note_to_master(self, note):
326 +
        self._master.fetch()
327 +
        if 'note' in self._master:
328 +
            note = self._master['note'] + '\n' + note
329 +
        note = note.strip()
330 +
        if note:
331 +
            self._master.set({'note': note[:NOTE_SIZE_LIMIT]})
332 +
333 +
    def check(self):
334 +
        """
335 +
        Send data if any in buffer
336 +
        """
337 +
        self.log.debug("KPI bulk buffer len: %s", len(self.kpi_buffer))
338 +
        if self.last_dispatch < (time.time() - self.send_interval):
339 +
            self.last_dispatch = time.time()
340 +
            if self.send_data and len(self.kpi_buffer):
341 +
                self.__send_data(self.kpi_buffer)
342 +
                self.kpi_buffer = []
343 +
344 +
            if self.send_monitoring:
345 +
                self.__send_monitoring()
346 +
        return super(BlazeMeterUploader, self).check()
347 +
348 +
    @send_with_retry
349 +
    def __send_data(self, data, do_check=True, is_final=False):
350 +
        """
351 +
        :type data: list[bzt.modules.aggregator.DataPoint]
352 +
        """
353 +
        if not self._session:
354 +
            return
355 +
356 +
        serialized = self._dpoint_serializer.get_kpi_body(data, is_final)
357 +
        self._session.send_kpi_data(serialized, do_check)
358 +
359 +
    def aggregated_second(self, data):
360 +
        """
361 +
        Send online data
362 +
        :param data: DataPoint
363 +
        """
364 +
        if self.send_data:
365 +
            self.kpi_buffer.append(data)
366 +
367 +
    def monitoring_data(self, data):
368 +
        if self.send_monitoring:
369 +
            self.monitoring_buffer.record_data(data)
370 +
371 +
    @send_with_retry
372 +
    def __send_monitoring(self):
373 +
        engine_id = self.engine.config.get('modules').get('shellexec').get('env').get('TAURUS_INDEX_ALL', '')
374 +
        if not engine_id:
375 +
            engine_id = "0"
376 +
        data = self.monitoring_buffer.get_monitoring_json(self._session)
377 +
        self._session.send_monitoring_data(engine_id, data)
378 +
379 +
    def __format_listing(self, zip_listing):
380 +
        lines = []
381 +
        for fname in sorted(zip_listing.keys()):
382 +
            bytestr = humanize_bytes(zip_listing[fname])
383 +
            if fname.startswith(self.engine.artifacts_dir):
384 +
                fname = fname[len(self.engine.artifacts_dir) + 1:]
385 +
            lines.append(bytestr + " " + fname)
386 +
        return "\n".join(lines)
387 +
388 +
389 +
class MonitoringBuffer(object):
390 +
    def __init__(self, size_limit, parent_log):
391 +
        self.size_limit = size_limit
392 +
        self.data = defaultdict(OrderedDict)
393 +
        self.log = parent_log.getChild(self.__class__.__name__)
394 +
        # data :: dict(datasource -> dict(interval -> datapoint))
395 +
        # datapoint :: dict(metric -> value)
396 +
397 +
    def record_data(self, data):
398 +
        for monitoring_item in data:
399 +
            item = copy.deepcopy(monitoring_item)
400 +
            source = item.pop('source')
401 +
            timestamp = int(item['ts'])
402 +
            item['interval'] = 1
403 +
            buff = self.data[source]
404 +
            if timestamp in buff:
405 +
                buff[timestamp].update(item)
406 +
            else:
407 +
                buff[timestamp] = item
408 +
409 +
        sources = list(self.data)
410 +
        for source in sources:
411 +
            if len(self.data[source]) > self.size_limit:
412 +
                self._downsample(self.data[source])
413 +
            self.log.debug("Monitoring buffer size '%s': %s", source, len(self.data[source]))
414 +
415 +
    def _downsample(self, buff):
416 +
        size = 1
417 +
        while len(buff) > self.size_limit:
418 +
            self._merge_small_intervals(buff, size)
419 +
            size += 1
420 +
421 +
    def _merge_small_intervals(self, buff, size):
422 +
        timestamps = list(buff)
423 +
        merged_already = set()
424 +
        for left, right in zip(timestamps, timestamps[1:]):
425 +
            if left in merged_already:
426 +
                continue
427 +
            if buff[left]['interval'] <= size:
428 +
                self._merge_datapoints(buff[left], buff[right])
429 +
                buff.pop(right)
430 +
                merged_already.add(left)
431 +
                merged_already.add(right)
432 +
433 +
    @staticmethod
434 +
    def _merge_datapoints(left, right):
435 +
        sum_size = float(left['interval'] + right['interval'])
436 +
        for metric in set(right):
437 +
            if metric in ('ts', 'interval'):
438 +
                continue
439 +
            if metric in left:
440 +
                left[metric] = (left[metric] * left['interval'] + right[metric] * right['interval']) / sum_size
441 +
            else:
442 +
                left[metric] = right[metric]
443 +
        left['interval'] = sum_size
444 +
445 +
    def get_monitoring_json(self, session):
446 +
        """
447 +
        :type session: Session
448 +
        """
449 +
        results = {}
450 +
        hosts = []
451 +
        kpis = {}
452 +
453 +
        for source, buff in iteritems(self.data):
454 +
            for timestamp, item in iteritems(buff):
455 +
                if source == 'local':
456 +
                    source = platform.node()
457 +
458 +
                if source not in results:
459 +
                    results[source] = {
460 +
                        "name": source,
461 +
                        "intervals": OrderedDict()
462 +
                    }
463 +
464 +
                if source not in hosts:
465 +
                    hosts.append(source)
466 +
467 +
                src = results[source]
468 +
                tstmp = timestamp * 1000
469 +
                tstmp_key = '%d' % tstmp
470 +
471 +
                if tstmp_key not in src['intervals']:
472 +
                    src['intervals'][tstmp_key] = {
473 +
                        "start": tstmp,
474 +
                        "duration": item['interval'] * 1000,
475 +
                        "indicators": {}
476 +
                    }
477 +
478 +
                for field, value in iteritems(item):
479 +
                    if field.lower().startswith('conn-all'):
480 +
                        field = 'Connections'
481 +
                    elif field.lower().startswith('cpu'):
482 +
                        field = 'CPU'
483 +
                    elif field.lower().startswith('mem'):
484 +
                        field = 'Memory'
485 +
                        value *= 100
486 +
                    elif field == 'bytes-recv' or field.lower().startswith('net'):
487 +
                        field = 'Network I/O'
488 +
                    elif field == 'engine-loop':
489 +
                        field = 'Busy Taurus'
490 +
                    else:
491 +
                        continue  # maybe one day BZA will accept all other metrics...
492 +
493 +
                    if field not in kpis:
494 +
                        kpis[field] = field
495 +
496 +
                    src['intervals'][tstmp_key]['indicators'][field] = {
497 +
                        "value": value,
498 +
                        "name": field,
499 +
                        "std": 0,
500 +
                        "mean": 0,
501 +
                        "sum": 0,
502 +
                        "min": 0,
503 +
                        "max": 0,
504 +
                        "sumOfSquares": 0,
505 +
                        "n": 1
506 +
                    }
507 +
508 +
        kpis = {"Network I/O": "Network I/O", "Memory": "Memory", "CPU": "CPU", "Connections": "Connections"}
509 +
        return {
510 +
            "reportInfo": {
511 +
                "sessionId": session['id'],
512 +
                "timestamp": time.time(),
513 +
                "userId": session['userId'],
514 +
                "testId": session['testId'],
515 +
                "type": "MONITOR",
516 +
                "testName": ""
517 +
            },
518 +
            "kpis": kpis,
519 +
            "hosts": hosts,
520 +
            "results": results
521 +
        }
522 +
523 +
524 +
class DatapointSerializer(object):
525 +
    def __init__(self, owner):
526 +
        """
527 +
        :type owner: BlazeMeterUploader
528 +
        """
529 +
        super(DatapointSerializer, self).__init__()
530 +
        self.owner = owner
531 +
        self.multi = 1000  # miltiplier factor for reporting
532 +
533 +
    def get_kpi_body(self, data_buffer, is_final):
534 +
        # - reporting format:
535 +
        #   {labels: <data>,    # see below
536 +
        #    sourceID: <id of BlazeMeterClient object>,
537 +
        #    [is_final: True]}  # for last report
538 +
        #
539 +
        # - elements of 'data' are described in __get_label()
540 +
        #
541 +
        # - elements of 'intervals' are described in __get_interval()
542 +
        #   every interval contains info about response codes have gotten on it.
543 +
        report_items = BetterDict()
544 +
        if data_buffer:
545 +
            self.owner.first_ts = min(self.owner.first_ts, data_buffer[0][DataPoint.TIMESTAMP])
546 +
            self.owner.last_ts = max(self.owner.last_ts, data_buffer[-1][DataPoint.TIMESTAMP])
547 +
548 +
            # following data is received in the cumulative way
549 +
            for label, kpi_set in iteritems(data_buffer[-1][DataPoint.CUMULATIVE]):
550 +
                report_item = self.__get_label(label, kpi_set)
551 +
                self.__add_errors(report_item, kpi_set)  # 'Errors' tab
552 +
                report_items[label] = report_item
553 +
554 +
            # fill 'Timeline Report' tab with intervals data
555 +
            # intervals are received in the additive way
556 +
            if report_items:
557 +
                for dpoint in data_buffer:
558 +
                    time_stamp = dpoint[DataPoint.TIMESTAMP]
559 +
                    for label, kpi_set in iteritems(dpoint[DataPoint.CURRENT]):
560 +
                        exc = TaurusInternalException('Cumulative KPISet is non-consistent')
561 +
                        report_item = report_items.get(label, exc)
562 +
                        report_item['intervals'].append(self.__get_interval(kpi_set, time_stamp))
563 +
564 +
        report_items = [report_items[key] for key in sorted(report_items.keys())]  # convert dict to list
565 +
        data = {"labels": report_items, "sourceID": id(self.owner)}
566 +
        if is_final:
567 +
            data['final'] = True
568 +
569 +
        return to_json(data)
570 +
571 +
    @staticmethod
572 +
    def __add_errors(report_item, kpi_set):
573 +
        errors = kpi_set[KPISet.ERRORS]
574 +
        for error in errors:
575 +
            if error["type"] == KPISet.ERRTYPE_ERROR:
576 +
                report_item['errors'].append({
577 +
                    'm': error['msg'],
578 +
                    "rc": error['rc'],
579 +
                    "count": error['cnt'],
580 +
                })
581 +
            elif error["type"] == KPISet.ERRTYPE_SUBSAMPLE:
582 +
                report_item['failedEmbeddedResources'].append({
583 +
                    "count": error['cnt'],
584 +
                    "rm": error['msg'],
585 +
                    "rc": error['rc'],
586 +
                    "url": list(error['urls'])[0] if error['urls'] else None,
587 +
                })
588 +
            else:
589 +
                report_item['assertions'].append({
590 +
                    'failureMessage': error['msg'],
591 +
                    'name': error['tag'] if error['tag'] else 'All Assertions',
592 +
                    'failures': error['cnt']
593 +
                    # TODO: "count", "errors" = ? (according do Udi's format description)
594 +
                })
595 +
596 +
    def __get_label(self, name, cumul):
597 +
        return {
598 +
            "n": cumul[KPISet.SAMPLE_COUNT],  # total count of samples
599 +
            "name": name if name else 'ALL',  # label
600 +
            "interval": 1,  # not used
601 +
            "intervals": [],  # list of intervals, fill later
602 +
            "samplesNotCounted": 0,  # not used
603 +
            "assertionsNotCounted": 0,  # not used
604 +
            "failedEmbeddedResources": [],  # not used
605 +
            "failedEmbeddedResourcesSpilloverCount": 0,  # not used
606 +
            "otherErrorsCount": 0,  # not used
607 +
            "errors": [],  # list of errors, fill later
608 +
            "assertions": [],  # list of assertions, fill later
609 +
            "percentileHistogram": [],  # not used
610 +
            "percentileHistogramLatency": [],  # not used
611 +
            "percentileHistogramBytes": [],  # not used
612 +
            "empty": False,  # not used
613 +
            "summary": self.__get_summary(cumul)  # summary info
614 +
        }
615 +
616 +
    def __get_summary(self, cumul):
617 +
        return {
618 +
            "first": self.owner.first_ts,
619 +
            "last": self.owner.last_ts,
620 +
            "duration": self.owner.last_ts - self.owner.first_ts,
621 +
            "failed": cumul[KPISet.FAILURES],
622 +
            "hits": cumul[KPISet.SAMPLE_COUNT],
623 +
624 +
            "avg": int(self.multi * cumul[KPISet.AVG_RESP_TIME]),
625 +
            "min": int(self.multi * cumul[KPISet.PERCENTILES]["0.0"]) if "0.0" in cumul[KPISet.PERCENTILES] else 0,
626 +
            "max": int(self.multi * cumul[KPISet.PERCENTILES]["100.0"]) if "100.0" in cumul[KPISet.PERCENTILES] else 0,
627 +
            "std": int(self.multi * cumul[KPISet.STDEV_RESP_TIME]),
628 +
            "tp90": int(self.multi * cumul[KPISet.PERCENTILES]["90.0"]) if "90.0" in cumul[KPISet.PERCENTILES] else 0,
629 +
            "tp95": int(self.multi * cumul[KPISet.PERCENTILES]["95.0"]) if "95.0" in cumul[KPISet.PERCENTILES] else 0,
630 +
            "tp99": int(self.multi * cumul[KPISet.PERCENTILES]["99.0"]) if "99.0" in cumul[KPISet.PERCENTILES] else 0,
631 +
632 +
            "latencyAvg": int(self.multi * cumul[KPISet.AVG_LATENCY]),
633 +
            "latencyMax": 0,
634 +
            "latencyMin": 0,
635 +
            "latencySTD": 0,
636 +
637 +
            "bytes": cumul[KPISet.BYTE_COUNT],
638 +
            "bytesMax": 0,
639 +
            "bytesMin": 0,
640 +
            "bytesAvg": int(cumul[KPISet.BYTE_COUNT] / float(cumul[KPISet.SAMPLE_COUNT])),
641 +
            "bytesSTD": 0,
642 +
643 +
            "otherErrorsSpillcount": 0,
644 +
        }
645 +
646 +
    def __get_interval(self, item, time_stamp):
647 +
        #   rc_list - list of info about response codes:
648 +
        #   {'n': <number of code encounters>,
649 +
        #    'f': <number of failed request (e.q. important for assertions)>
650 +
        #    'rc': <string value of response code>}
651 +
        rc_list = []
652 +
        for r_code, cnt in iteritems(item[KPISet.RESP_CODES]):
653 +
            fails = [err['cnt'] for err in item[KPISet.ERRORS] if str(err['rc']) == r_code]
654 +
            rc_list.append({"n": cnt, 'f': fails, "rc": r_code})
655 +
656 +
        return {
657 +
            "ec": item[KPISet.FAILURES],
658 +
            "ts": time_stamp,
659 +
            "na": item[KPISet.CONCURRENCY],
660 +
            "n": item[KPISet.SAMPLE_COUNT],
661 +
            "failed": item[KPISet.FAILURES],
662 +
            "rc": rc_list,
663 +
            "t": {
664 +
                "min": int(self.multi * item[KPISet.PERCENTILES]["0.0"]) if "0.0" in item[KPISet.PERCENTILES] else 0,
665 +
                "max": int(self.multi * item[KPISet.PERCENTILES]["100.0"]) if "100.0" in item[
666 +
                    KPISet.PERCENTILES] else 0,
667 +
                "sum": self.multi * item[KPISet.AVG_RESP_TIME] * item[KPISet.SAMPLE_COUNT],
668 +
                "n": item[KPISet.SAMPLE_COUNT],
669 +
                "std": self.multi * item[KPISet.STDEV_RESP_TIME],
670 +
                "avg": self.multi * item[KPISet.AVG_RESP_TIME]
671 +
            },
672 +
            "lt": {
673 +
                "min": 0,
674 +
                "max": 0,
675 +
                "sum": self.multi * item[KPISet.AVG_LATENCY] * item[KPISet.SAMPLE_COUNT],
676 +
                "n": item[KPISet.SAMPLE_COUNT],
677 +
                "std": 0,
678 +
                "avg": self.multi * item[KPISet.AVG_LATENCY]
679 +
            },
680 +
            "by": {
681 +
                "min": 0,
682 +
                "max": 0,
683 +
                "sum": item[KPISet.BYTE_COUNT],
684 +
                "n": item[KPISet.SAMPLE_COUNT],
685 +
                "std": 0,
686 +
                "avg": item[KPISet.BYTE_COUNT] / float(item[KPISet.SAMPLE_COUNT])
687 +
            },
688 +
        }
Files Coverage
bzt 90.32%
Project Totals (70 files) 90.32%
9230.2
TRAVIS_PYTHON_VERSION=3.8
TRAVIS_OS_NAME=linux
1
codecov:
2
  notify:
3
    require_ci_to_pass: yes
4

5
coverage:
6
  round: up
7

8
ignore:
9
  - bzt/resources
Sunburst
The inner-most circle is the entire project, moving away from the center are folders then, finally, a single file. The size and color of each slice is representing the number of statements and the coverage, respectively.
Icicle
The top section represents the entire project. Proceeding with folders and finally individual files. The size and color of each slice is representing the number of statements and the coverage, respectively.
Grid
Each block represents a single file in the project. The size and color of each block is represented by the number of statements and the coverage, respectively.
Loading