diff --git a/.dockerignore b/.dockerignore index 437cb0ee..752df873 100644 --- a/.dockerignore +++ b/.dockerignore @@ -19,3 +19,5 @@ bfx-report-ui/build bfx-report-ui/bfx-report-express/logs/*.log bfx-report-ui/bfx-report-express/config/*.json stub.AppImage +e2e-test-report.xml +test-report.json diff --git a/.github/actions/prepare-mac-runner/action.yml b/.github/actions/prepare-mac-runner/action.yml new file mode 100644 index 00000000..f5a52f0f --- /dev/null +++ b/.github/actions/prepare-mac-runner/action.yml @@ -0,0 +1,7 @@ +name: 'Prepare Mac runner' +description: 'Turn uninterrupted testing on mac' +runs: + using: composite + steps: + - run: ${{ github.action_path }}/prepare-mac-runner.sh + shell: bash diff --git a/.github/actions/prepare-mac-runner/prepare-mac-runner.sh b/.github/actions/prepare-mac-runner/prepare-mac-runner.sh new file mode 100755 index 00000000..37a25e0c --- /dev/null +++ b/.github/actions/prepare-mac-runner/prepare-mac-runner.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash + +# Turn uninterrupted testing on mac + +# Change Local name to avoid name clash causing alert +uniqueComputerName="mac-e2e-test-runner-$RANDOM" +sudo scutil --set LocalHostName "$uniqueComputerName" +sudo scutil --set ComputerName "$uniqueComputerName" + +# Close Notification window +sudo killall UserNotificationCenter || true + +# Do not disturb +defaults -currentHost write com.apple.notificationcenterui doNotDisturb -boolean true +defaults -currentHost write com.apple.notificationcenterui doNotDisturbDate -date "`date -u +\"%Y-%m-%d %H:%M:%S +0000\"`" +sudo killall NotificationCenter + +# Disable firewall +sudo /usr/libexec/ApplicationFirewall/socketfilterfw --setglobalstate off +sudo /usr/libexec/ApplicationFirewall/socketfilterfw -k + +# Close Finder Windows using Apple Script +sudo osascript -e 'tell application "Finder" to close windows' diff --git a/.github/workflows/build-electron-app.yml b/.github/workflows/build-electron-app.yml index f020e287..0f07009b 100644 --- a/.github/workflows/build-electron-app.yml +++ b/.github/workflows/build-electron-app.yml @@ -30,7 +30,7 @@ jobs: runs-on: ubuntu-22.04 steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: submodules: recursive - name: Set repo owner @@ -76,6 +76,20 @@ jobs: max_attempts: 3 retry_on: any command: ./scripts/launch.sh -lwp + - name: Zip Linux Unpacked build + run: zip -r dist/linux-unpacked.zip dist/linux-unpacked + - name: Upload Linux Unpacked build + uses: actions/upload-artifact@v3 + with: + name: linux-unpacked + path: dist/linux-unpacked.zip + - name: Zip Win Unpacked build + run: zip -r dist/win-unpacked.zip dist/win-unpacked + - name: Upload Win Unpacked build + uses: actions/upload-artifact@v3 + with: + name: win-unpacked + path: dist/win-unpacked.zip - name: Prepare cache folders run: | sudo chown -R $(id -u):$(id -g) ~/.cache/electron @@ -83,10 +97,10 @@ jobs: mac-builder: timeout-minutes: 90 - runs-on: macos-11 + runs-on: macos-12 steps: - name: Checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: submodules: recursive - name: Set repo owner @@ -135,3 +149,108 @@ jobs: max_attempts: 3 retry_on: any command: ./scripts/build-release.sh -mp + - name: Zip Mac Unpacked build + run: zip -r dist/mac.zip dist/mac + - name: Upload Mac Unpacked build + uses: actions/upload-artifact@v3 + with: + name: mac-unpacked + path: dist/mac.zip + + linux-e2e-test-runner: + name: Linux E2E Test Runner + timeout-minutes: 30 + runs-on: ubuntu-22.04 + needs: [linux-win-docker-builder] + steps: + - name: Checkout + uses: actions/checkout@v4 + - uses: actions/setup-node@v3 + with: + node-version: 18.17.1 + - name: Install main dev deps + run: npm i --development --no-audit --progress=false --force + - name: Download Linux Unpacked build + uses: actions/download-artifact@v3 + with: + name: linux-unpacked + path: dist + - name: Unzip Linux Unpacked build + run: unzip dist/linux-unpacked.zip + - name: Run tests + uses: coactions/setup-xvfb@v1.0.1 + with: + run: npm run e2e + - name: Normalize E2E test report + run: node ./scripts/node/normalize-e2e-test-report e2e-test-report.xml + - name: Upload Linux E2E test results + uses: actions/upload-artifact@v3 + with: + name: linux-e2e-test-results + path: e2e-test-report.xml + + win-e2e-test-runner: + name: Win E2E Test Runner + timeout-minutes: 30 + runs-on: windows-2022 + needs: [linux-win-docker-builder] + steps: + - name: Checkout + uses: actions/checkout@v4 + - uses: actions/setup-node@v3 + with: + node-version: 18.17.1 + - name: Install main dev deps + run: npm i --development --no-audit --progress=false --force + - name: Download Linux Unpacked build + uses: actions/download-artifact@v3 + with: + name: win-unpacked + path: dist + - name: Unzip Win Unpacked build + run: 7z -y x dist/win-unpacked.zip + - name: Run tests + uses: coactions/setup-xvfb@v1.0.1 + with: + run: npm run e2e + - name: Normalize E2E test report + run: node ./scripts/node/normalize-e2e-test-report e2e-test-report.xml + - name: Upload Win E2E test results + uses: actions/upload-artifact@v3 + with: + name: win-e2e-test-results + path: e2e-test-report.xml + + mac-e2e-test-runner: + name: Mac E2E Test Runner + timeout-minutes: 30 + runs-on: macos-12 + needs: [mac-builder] + steps: + - name: Checkout + uses: actions/checkout@v4 + - name: Prepare Mac runner + uses: ./.github/actions/prepare-mac-runner + - uses: actions/setup-node@v3 + with: + node-version: 18.17.1 + - name: Install main dev deps + run: npm i --development --no-audit --progress=false --force + - name: Download Mac Unpacked build + uses: actions/download-artifact@v3 + with: + name: mac-unpacked + path: dist + - name: Unzip Mac Unpacked build + run: unzip dist/mac.zip + - name: Run tests + uses: coactions/setup-xvfb@v1.0.1 + with: + run: npm run e2e + - name: Normalize E2E test report + run: node ./scripts/node/normalize-e2e-test-report e2e-test-report.xml + - name: Upload Mac E2E test results + uses: actions/upload-artifact@v3 + with: + name: mac-e2e-test-results + path: e2e-test-report.xml diff --git a/.github/workflows/e2e-test-report.yml b/.github/workflows/e2e-test-report.yml new file mode 100644 index 00000000..f8d8a397 --- /dev/null +++ b/.github/workflows/e2e-test-report.yml @@ -0,0 +1,47 @@ +name: 'E2E Test Report' +run-name: 'E2E Test Report: Commit ${{ github.sha }}' + +on: + workflow_run: + workflows: ['Build release'] + types: + - completed + +permissions: + contents: read + actions: read + checks: write + +jobs: + e2e-web-page-report: + name: E2E Web Page Report + runs-on: ubuntu-22.04 + steps: + - uses: dorny/test-reporter@v1 + id: linux-e2e-test-results + with: + artifact: linux-e2e-test-results + name: Linux E2E Tests + path: e2e-test-report.xml + reporter: jest-junit + - uses: dorny/test-reporter@v1 + id: win-e2e-test-results + with: + artifact: win-e2e-test-results + name: Win E2E Tests + path: e2e-test-report.xml + reporter: jest-junit + - uses: dorny/test-reporter@v1 + id: mac-e2e-test-results + with: + artifact: mac-e2e-test-results + name: Mac E2E Tests + path: e2e-test-report.xml + reporter: jest-junit + - name: E2E Test Report Summary + run: | + echo "### E2E Test Report page is ready! :rocket:" >> $GITHUB_STEP_SUMMARY + echo "And available at the following links for applicable OSs:" >> $GITHUB_STEP_SUMMARY + echo "- [Linux](${{ steps.linux-e2e-test-results.outputs.url_html }})" >> $GITHUB_STEP_SUMMARY + echo "- [Win](${{ steps.win-e2e-test-results.outputs.url_html }})" >> $GITHUB_STEP_SUMMARY + echo "- [Mac](${{ steps.mac-e2e-test-results.outputs.url_html }})" >> $GITHUB_STEP_SUMMARY diff --git a/.gitignore b/.gitignore index 63b4b0c8..204379e2 100644 --- a/.gitignore +++ b/.gitignore @@ -13,3 +13,5 @@ package-lock.json lastCommit.json electronEnv.json stub.AppImage +e2e-test-report.xml +test-report.json diff --git a/electron-builder-config.js b/electron-builder-config.js index 8d4f2cee..3b970ca7 100644 --- a/electron-builder-config.js +++ b/electron-builder-config.js @@ -107,6 +107,10 @@ module.exports = { 'build/icon.*', 'build/loader.*', '!scripts${/*}', + '!test/${/*}', + '!electronEnv.json.example', + '!e2e-test-report.xml', + '!wdio.conf.js', '!bfx-report-ui', 'bfx-report-ui/build', @@ -133,6 +137,8 @@ module.exports = { '!**/LICENSE.md', '!**/.gitmodules', '!**/.npmrc', + '!**/.mocharc.json', + '!**/.github/${/*}', { from: 'bfx-reports-framework/node_modules', to: 'bfx-reports-framework/node_modules', @@ -143,6 +149,16 @@ module.exports = { to: 'bfx-report-ui/bfx-report-express/node_modules', filter: nodeModulesFilter }, + { + from: 'node_modules/wdio-electron-service', + to: 'node_modules/wdio-electron-service', + filter: nodeModulesFilter + }, + { + from: 'node_modules/wdio-electron-service/node_modules', + to: 'node_modules/wdio-electron-service/node_modules', + filter: nodeModulesFilter + }, ...getNodeModulesSubSources('bfx-reports-framework'), ...getNodeModulesSubSources('bfx-report-ui/bfx-report-express') ], diff --git a/index.js b/index.js index 98810008..4de1e892 100644 --- a/index.js +++ b/index.js @@ -14,6 +14,8 @@ try { const { app } = require('electron') +const isTestEnv = process.env.NODE_ENV === 'test' + const productName = require('./src/helpers/product-name') app.setName(productName) @@ -33,6 +35,10 @@ if (shouldQuit) { } else { ;(async () => { try { + if (isTestEnv) { + require('wdio-electron-service/main') + } + await initializeApp() } catch (err) { console.error(err) diff --git a/package.json b/package.json index e3a0968f..f4d9bf9f 100644 --- a/package.json +++ b/package.json @@ -35,11 +35,18 @@ }, "devDependencies": { "@mapbox/node-pre-gyp": "1.0.6", + "@wdio/cli": "8.22.1", + "@wdio/junit-reporter": "8.21.0", + "@wdio/local-runner": "8.22.1", + "@wdio/mocha-framework": "8.22.0", + "@wdio/spec-reporter": "8.21.0", "app-builder-bin": "4.1.0", - "electron": "27.0.2", - "electron-builder": "23.6.0", + "cross-env": "7.0.3", + "electron": "27.0.4", + "electron-builder": "24.8.1", "mocha": "10.2.0", - "standard": "16.0.4" + "standard": "16.0.4", + "wdio-electron-service": "5.4.0" }, "standard": { "globals": [ @@ -50,11 +57,12 @@ ] }, "scripts": { - "start": "export NODE_ENV=development&&export DEBUG=*&&electron .", + "start": "cross-env NODE_ENV=development DEBUG=* electron .", "test": "standard && npm run unit", - "unit": "export NODE_ENV=test && mocha './src/**/__test__/*.spec.js' --config .mocharc.json", + "unit": "cross-env NODE_ENV=test mocha './src/**/__test__/*.spec.js' --config .mocharc.json", "setup": "./scripts/setup.sh", "launch": "./scripts/launch.sh", - "sync-repo": "./scripts/sync-repo.sh" + "sync-repo": "./scripts/sync-repo.sh", + "e2e": "cross-env NODE_ENV=test wdio run ./wdio.conf.js" } } diff --git a/scripts/build-release.sh b/scripts/build-release.sh index a2718592..601317d5 100755 --- a/scripts/build-release.sh +++ b/scripts/build-release.sh @@ -236,7 +236,9 @@ node "$ROOT/node_modules/.bin/electron-builder" \ unpackedFolder=$(ls -d "$DIST_FOLDER/"*/ | grep $targetPlatform | head -1) -rm -rf "$unpackedFolder" +# Don't remove the unpacked folder of the app for e2e test runner +# but keep it for further debugging purposes +# rm -rf "$unpackedFolder" rm -rf "$DIST_FOLDER/.icon-ico" rm -f "$DIST_FOLDER/builder-effective-config.yaml" rm -f "$DIST_FOLDER/builder-debug.yml" diff --git a/scripts/node/normalize-e2e-test-report.js b/scripts/node/normalize-e2e-test-report.js new file mode 100644 index 00000000..4de742b1 --- /dev/null +++ b/scripts/node/normalize-e2e-test-report.js @@ -0,0 +1,20 @@ +'use strict' + +const path = require('path') +const { + readFileSync, + writeFileSync +} = require('fs') + +const cwd = process.cwd() +const fileName = process.argv[2] +const filePath = path.join(cwd, fileName) + +const content = readFileSync(filePath, { encoding: 'utf8' }) +/* + * For compatibility with the dorny/test-reporter, + * there needs to be 'time' attribute to '' tag + */ +const normalizedContent = content + .replace(//gi, '') +writeFileSync(filePath, normalizedContent) diff --git a/src/enforce-macos-app-location.js b/src/enforce-macos-app-location.js index d2e1c5a7..ae16b33f 100644 --- a/src/enforce-macos-app-location.js +++ b/src/enforce-macos-app-location.js @@ -10,6 +10,7 @@ const { module.exports = async () => { if ( + process.env.NODE_ENV === 'test' || process.env.NODE_ENV === 'development' || process.platform !== 'darwin' ) { diff --git a/src/preload.js b/src/preload.js new file mode 100644 index 00000000..804b55ab --- /dev/null +++ b/src/preload.js @@ -0,0 +1,16 @@ +'use strict' + +const { ipcRenderer, contextBridge } = require('electron') + +// See the Electron documentation for details on how to use preload scripts: +// https://www.electronjs.org/docs/latest/tutorial/process-model#preload-scripts +const isTest = process.env.NODE_ENV === 'test' + +if (isTest) { + require('wdio-electron-service/preload') +} + +contextBridge.exposeInMainWorld('electron', { + openDialog: (method, config) => ipcRenderer + .send('dialog', method, config) +}) diff --git a/src/window-creators.js b/src/window-creators.js index 461eb16f..82779b60 100644 --- a/src/window-creators.js +++ b/src/window-creators.js @@ -93,7 +93,12 @@ const _createWindow = async ( icon: path.join(__dirname, '../build/icons/512x512.png'), backgroundColor: '#172d3e', show: false, - ...props + ...props, + + webPreferences: { + preload: path.join(__dirname, 'preload.js'), + ...props?.webPreferences + } } wins[winName] = new BrowserWindow(_props) diff --git a/test/e2e/application.spec.js b/test/e2e/application.spec.js new file mode 100644 index 00000000..bb4f2077 --- /dev/null +++ b/test/e2e/application.spec.js @@ -0,0 +1,7 @@ +const { browser, expect } = require('@wdio/globals') + +describe('Electron Testing', () => { + it('should print application title', async () => { + expect(await await browser.getTitle()).toBe('Bitfinex Reporting & Performance Tools') + }) +}) diff --git a/wdio.conf.js b/wdio.conf.js new file mode 100644 index 00000000..77d8a794 --- /dev/null +++ b/wdio.conf.js @@ -0,0 +1,312 @@ +process.env.TEST = 'true' + +const getAppBinaryPath = () => { + if (process.platform === 'win32') { + return './dist/win-unpacked/Bitfinex Report.exe' + } + if (process.platform === 'darwin') { + return './dist/mac/Bitfinex Report.app/Contents/MacOS/Bitfinex Report' + } + if (process.platform === 'linux') { + return './dist/linux-unpacked/app' + } +} + +exports.config = { + // + // ==================== + // Runner Configuration + // ==================== + // WebdriverIO supports running e2e tests as well as unit and component tests. + runner: 'local', + // + // ================== + // Specify Test Files + // ================== + // Define which test specs should run. The pattern is relative to the directory + // of the configuration file being run. + // + // The specs are defined as an array of spec files (optionally using wildcards + // that will be expanded). The test for each spec file will be run in a separate + // worker process. In order to have a group of spec files run in the same worker + // process simply enclose them in an array within the specs array. + // + // If you are calling `wdio` from an NPM script (see https://docs.npmjs.com/cli/run-script), + // then the current working directory is where your `package.json` resides, so `wdio` + // will be called from there. + // + specs: [ + './test/e2e/*.spec.js' + ], + // Patterns to exclude. + exclude: [ + // 'path/to/excluded/files' + ], + // + // ============ + // Capabilities + // ============ + // Define your capabilities here. WebdriverIO can run multiple capabilities at the same + // time. Depending on the number of capabilities, WebdriverIO launches several test + // sessions. Within your capabilities you can overwrite the spec and exclude options in + // order to group specific specs to a specific capability. + // + // First, you can define how many instances should be started at the same time. Let's + // say you have 3 different capabilities (Chrome, Firefox, and Safari) and you have + // set maxInstances to 1; wdio will spawn 3 processes. Therefore, if you have 10 spec + // files and you set maxInstances to 10, all spec files will get tested at the same time + // and 30 processes will get spawned. The property handles how many capabilities + // from the same test should run tests. + // + maxInstances: 10, + // + // If you have trouble getting all important capabilities together, check out the + // Sauce Labs platform configurator - a great tool to configure your capabilities: + // https://saucelabs.com/platform/platform-configurator + // + capabilities: [{ + browserName: 'electron', + // Electron service options + // see https://webdriver.io/docs/wdio-electron-service/#configuration + 'wdio:electronServiceOptions': { + appBinaryPath: getAppBinaryPath(), + + // custom application args + appArgs: [] + } + }], + + // + // =================== + // Test Configurations + // =================== + // Define all options that are relevant for the WebdriverIO instance here + // + // Level of logging verbosity: trace | debug | info | warn | error | silent + logLevel: 'debug', + // + // Set specific log levels per logger + // loggers: + // - webdriver, webdriverio + // - @wdio/browserstack-service, @wdio/devtools-service, @wdio/sauce-service + // - @wdio/mocha-framework, @wdio/jasmine-framework + // - @wdio/local-runner + // - @wdio/sumologic-reporter + // - @wdio/cli, @wdio/config, @wdio/utils + // Level of logging verbosity: trace | debug | info | warn | error | silent + // logLevels: { + // webdriver: 'info', + // '@wdio/appium-service': 'info' + // }, + // + // If you only want to run your tests until a specific amount of tests have failed use + // bail (default is 0 - don't bail, run all tests). + bail: 0, + // + // Set a base URL in order to shorten url command calls. If your `url` parameter starts + // with `/`, the base url gets prepended, not including the path portion of your baseUrl. + // If your `url` parameter starts without a scheme or `/` (like `some/path`), the base url + // gets prepended directly. + baseUrl: '', + // + // Default timeout for all waitFor* commands. + waitforTimeout: 10000, + // + // Default timeout in milliseconds for request + // if browser driver or grid doesn't send response + connectionRetryTimeout: 120000, + // + // Default request retries count + connectionRetryCount: 3, + // + // Test runner services + // Services take over a specific job you don't want to take care of. They enhance + // your test setup with almost no effort. Unlike plugins, they don't add new + // commands. Instead, they hook themselves up into the test process. + services: ['electron'], + + // Framework you want to run your specs with. + // The following are supported: Mocha, Jasmine, and Cucumber + // see also: https://webdriver.io/docs/frameworks + // + // Make sure you have the wdio adapter package for the specific framework installed + // before running any tests. + framework: 'mocha', + + // + // The number of times to retry the entire specfile when it fails as a whole + // specFileRetries: 1, + // + // Delay in seconds between the spec file retry attempts + // specFileRetriesDelay: 0, + // + // Whether or not retried spec files should be retried immediately or deferred to the end of the queue + // specFileRetriesDeferred: false, + // + // Test reporter for stdout. + // The only one supported by default is 'dot' + // see also: https://webdriver.io/docs/dot-reporter + reporters: [ + ['spec', { + showPreface: false + }], + ['junit', { + outputDir: './', + outputFileFormat: () => 'e2e-test-report.xml' + }] + ], + + // Options to be passed to Mocha. + // See the full list at http://mochajs.org/ + mochaOpts: { + ui: 'bdd', + timeout: 60000 + } + + // + // ===== + // Hooks + // ===== + // WebdriverIO provides several hooks you can use to interfere with the test process in order to enhance + // it and to build services around it. You can either apply a single function or an array of + // methods to it. If one of them returns with a promise, WebdriverIO will wait until that promise got + // resolved to continue. + /** + * Gets executed once before all workers get launched. + * @param {object} config wdio configuration object + * @param {Array.} capabilities list of capabilities details + */ + // onPrepare: function (config, capabilities) { + // }, + /** + * Gets executed before a worker process is spawned and can be used to initialise specific service + * for that worker as well as modify runtime environments in an async fashion. + * @param {string} cid capability id (e.g 0-0) + * @param {object} caps object containing capabilities for session that will be spawn in the worker + * @param {object} specs specs to be run in the worker process + * @param {object} args object that will be merged with the main configuration once worker is initialized + * @param {object} execArgv list of string arguments passed to the worker process + */ + // onWorkerStart: function (cid, caps, specs, args, execArgv) { + // }, + /** + * Gets executed just after a worker process has exited. + * @param {string} cid capability id (e.g 0-0) + * @param {number} exitCode 0 - success, 1 - fail + * @param {object} specs specs to be run in the worker process + * @param {number} retries number of retries used + */ + // onWorkerEnd: function (cid, exitCode, specs, retries) { + // }, + /** + * Gets executed just before initialising the webdriver session and test framework. It allows you + * to manipulate configurations depending on the capability or spec. + * @param {object} config wdio configuration object + * @param {Array.} capabilities list of capabilities details + * @param {Array.} specs List of spec file paths that are to be run + * @param {string} cid worker id (e.g. 0-0) + */ + // beforeSession: function (config, capabilities, specs, cid) { + // }, + /** + * Gets executed before test execution begins. At this point you can access to all global + * variables like `browser`. It is the perfect place to define custom commands. + * @param {Array.} capabilities list of capabilities details + * @param {Array.} specs List of spec file paths that are to be run + * @param {object} browser instance of created browser/device session + */ + // before: function (capabilities, specs) { + // }, + /** + * Runs before a WebdriverIO command gets executed. + * @param {string} commandName hook command name + * @param {Array} args arguments that command would receive + */ + // beforeCommand: function (commandName, args) { + // }, + /** + * Hook that gets executed before the suite starts + * @param {object} suite suite details + */ + // beforeSuite: function (suite) { + // }, + /** + * Function to be executed before a test (in Mocha/Jasmine) starts. + */ + // beforeTest: function (test, context) { + // }, + /** + * Hook that gets executed _before_ a hook within the suite starts (e.g. runs before calling + * beforeEach in Mocha) + */ + // beforeHook: function (test, context, hookName) { + // }, + /** + * Hook that gets executed _after_ a hook within the suite starts (e.g. runs after calling + * afterEach in Mocha) + */ + // afterHook: function (test, context, { error, result, duration, passed, retries }, hookName) { + // }, + /** + * Function to be executed after a test (in Mocha/Jasmine only) + * @param {object} test test object + * @param {object} context scope object the test was executed with + * @param {Error} result.error error object in case the test fails, otherwise `undefined` + * @param {*} result.result return object of test function + * @param {number} result.duration duration of test + * @param {boolean} result.passed true if test has passed, otherwise false + * @param {object} result.retries information about spec related retries, e.g. `{ attempts: 0, limit: 0 }` + */ + // afterTest: function(test, context, { error, result, duration, passed, retries }) { + // }, + + /** + * Hook that gets executed after the suite has ended + * @param {object} suite suite details + */ + // afterSuite: function (suite) { + // }, + /** + * Runs after a WebdriverIO command gets executed + * @param {string} commandName hook command name + * @param {Array} args arguments that command would receive + * @param {number} result 0 - command success, 1 - command error + * @param {object} error error object if any + */ + // afterCommand: function (commandName, args, result, error) { + // }, + /** + * Gets executed after all tests are done. You still have access to all global variables from + * the test. + * @param {number} result 0 - test pass, 1 - test fail + * @param {Array.} capabilities list of capabilities details + * @param {Array.} specs List of spec file paths that ran + */ + // after: function (result, capabilities, specs) { + // }, + /** + * Gets executed right after terminating the webdriver session. + * @param {object} config wdio configuration object + * @param {Array.} capabilities list of capabilities details + * @param {Array.} specs List of spec file paths that ran + */ + // afterSession: function (config, capabilities, specs) { + // }, + /** + * Gets executed after all workers got shut down and the process is about to exit. An error + * thrown in the onComplete hook will result in the test run failing. + * @param {object} exitCode 0 - success, 1 - fail + * @param {object} config wdio configuration object + * @param {Array.} capabilities list of capabilities details + * @param {} results object containing test results + */ + // onComplete: function(exitCode, config, capabilities, results) { + // }, + /** + * Gets executed when a refresh happens. + * @param {string} oldSessionId session ID of the old session + * @param {string} newSessionId session ID of the new session + */ + // onReload: function(oldSessionId, newSessionId) { + // } +}