diff --git a/.github/ISSUE_TEMPLATE/release-checklist.md b/.github/ISSUE_TEMPLATE/release-checklist.md index 06c15d3154..3e6259cefc 100644 --- a/.github/ISSUE_TEMPLATE/release-checklist.md +++ b/.github/ISSUE_TEMPLATE/release-checklist.md @@ -41,15 +41,15 @@ Release captain responsible - <@gh_username> - [ ] Prepare for the release. - [ ] Update the [`nebari upgrade`](https://github.com/nebari-dev/nebari/blob/develop/src/_nebari/upgrade.py) for this release - [ ] Add upgrade messaging including deprecation warnings, version specific warnings and so on. - - [ ] Announce build freeze. + - [ ] Optionally, announce a merge freeze. - [ ] Release Candidate (RC) cycle. - Is this a hotfix? - [ ] Create a new branch off of the last version tag. - Use this branch to cut the pre-release and the "official" release. - [ ] `git cherry-pick` the commits that should be included. + - [ ] [Cut RC via GHA release workflow (w/ "This is a pre-release" checked).](https://github.com/nebari-dev/nebari/releases/new) - [ ] Perform end-to-end testing. [Use the Testing Checklist template.](https://github.com/nebari-dev/nebari/issues/new?assignees=&labels=type%3A+release+%F0%9F%8F%B7&template=testing-checklist.md&title=Testing+checklist+for+) - For minor releases, relying on the end-to-end integration tests might suffice. - - [ ] [Cut RC via GHA release workflow (w/ "This is a pre-release" checked).](https://github.com/nebari-dev/nebari/releases/new) - [ ] End-user validation. - If possible, pull in volunteers to help test. - (Repeat steps if necessary) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index bac0c8b888..ea466e8c7e 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -30,6 +30,14 @@ _Put a `x` in the boxes that apply_ - [ ] Did you test the pull request locally? - [ ] Did you add new tests? +## How to test this PR? + + + ## Any other comments? --- -### Release 2024.5.1 - May 13, 2024 +## Release 2024.9.1 - September 27, 2024 -## What's Changed +> WARNING: This release changes how group directories are mounted in JupyterLab pods: only groups with specific permissions will have their directories mounted. If you rely on custom group mounts, we strongly recommend running `nebari upgrade` before updating. This will prompt you to confirm how Nebari should handle your groups—either keep them mounted or allow unmounting. **No data will be lost**, and you can reverse this anytime. + +### What's Changed +* Fix: KeyValueDict error when deploying to existing infrastructure by @oftheaxe in https://github.com/nebari-dev/nebari/pull/2560 +* Remove unused AWS terraform modules by @marcelovilla in https://github.com/nebari-dev/nebari/pull/2623 +* Upgrade Hashicorp Vault action by @aktech in https://github.com/nebari-dev/nebari/pull/2616 +* Pass `oauth_no_confirm=True` to jhub-apps by @krassowski in https://github.com/nebari-dev/nebari/pull/2631 +* Use Rook Ceph for Jupyterhub and Conda Store drives by @Adam-D-Lewis in https://github.com/nebari-dev/nebari/pull/2541 +* Fix typo in guided init by @marcelovilla in https://github.com/nebari-dev/nebari/pull/2635 +* Action var tests off by @BrianCashProf in https://github.com/nebari-dev/nebari/pull/2632 +* add a "moved" block to account for refactored terraform code without deleting/recreating NFS disks by @Adam-D-Lewis in https://github.com/nebari-dev/nebari/pull/2639 +* Use Helm Chart for JupyterHub 5.1.0 by @krassowski in https://github.com/nebari-dev/nebari/pull/2661 +* Add a how to test section to PR template by @marcelovilla in https://github.com/nebari-dev/nebari/pull/2659 +* Support disallowed nebari config changes by @Adam-D-Lewis in https://github.com/nebari-dev/nebari/pull/2660 +* Fix converted init command in guided init by @marcelovilla in https://github.com/nebari-dev/nebari/pull/2666 +* Add initial uptime metrics by @dcmcand in https://github.com/nebari-dev/nebari/pull/2609 +* Refactor and extend Playwright tests by @viniciusdc in https://github.com/nebari-dev/nebari/pull/2644 +* Remove Cypress remaining tests/files by @viniciusdc in https://github.com/nebari-dev/nebari/pull/2672 +* refactor jupyterhub user token retrieval within pytest by @viniciusdc in https://github.com/nebari-dev/nebari/pull/2645 +* add moved block to account for terraform changes on AWS only by @Adam-D-Lewis in https://github.com/nebari-dev/nebari/pull/2673 +* Refactor shared group mounting using RBAC by @viniciusdc in https://github.com/nebari-dev/nebari/pull/2593 +* Dashboard fix usage report by @kenafoster in https://github.com/nebari-dev/nebari/pull/2671 +* only capture stdout not stdout+stderr when capture_output=True by @Adam-D-Lewis in https://github.com/nebari-dev/nebari/pull/2704 +* revert breaking change to azure deployment test by @Adam-D-Lewis in https://github.com/nebari-dev/nebari/pull/2706 +* Refactor GitOps approach prompt flow in guided init by @marcelovilla in https://github.com/nebari-dev/nebari/pull/2269 +* template the kustomization.yaml file by @dcmcand in https://github.com/nebari-dev/nebari/pull/2667 +* Fix auto-provisioned GitHub repo description after guided init by @marcelovilla in https://github.com/nebari-dev/nebari/pull/2708 +* Add amazon_web_services configuration option to specify EKS cluster api server endpoint access setting by @joneszc in https://github.com/nebari-dev/nebari/pull/2618 +* Use Google Auth and Cloud Python APIs instead of `gcloud` CLI by @swastik959 in https://github.com/nebari-dev/nebari/pull/2083 +* fix broken links in README.md, SECURITY.md, and CONTRIBUTING.md by @blakerosenthal in https://github.com/nebari-dev/nebari/pull/2720 +* add test for changing dicts and lists by @Adam-D-Lewis in https://github.com/nebari-dev/nebari/pull/2724 +* 2024.9.1 upgrade notes by @Adam-D-Lewis in https://github.com/nebari-dev/nebari/pull/2726 +* Add Support for AWS Launch Template Configuration by @viniciusdc in https://github.com/nebari-dev/nebari/pull/2668 +* Run terraform init before running terraform show by @marcelovilla in https://github.com/nebari-dev/nebari/pull/2734 +* Release Process Checklist Updates by @Adam-D-Lewis in https://github.com/nebari-dev/nebari/pull/2727 +* Test implicit aiohttp's TCP to HTTP connector change by @viniciusdc in https://github.com/nebari-dev/nebari/pull/2741 +* remove comments by @Adam-D-Lewis in https://github.com/nebari-dev/nebari/pull/2743 +* Deploy Rook Ceph Helm only when Ceph FS Needed by @kenafoster in https://github.com/nebari-dev/nebari/pull/2742 +* fix group mounting paths by @viniciusdc in https://github.com/nebari-dev/nebari/pull/2738 +* Add compatibility prompt and notes for shared group mounting by @viniciusdc in https://github.com/nebari-dev/nebari/pull/2739 + +### New Contributors +* @oftheaxe made their first contribution in https://github.com/nebari-dev/nebari/pull/2560 +* @joneszc made their first contribution in https://github.com/nebari-dev/nebari/pull/2618 +* @swastik959 made their first contribution in https://github.com/nebari-dev/nebari/pull/2083 +* @blakerosenthal made their first contribution in https://github.com/nebari-dev/nebari/pull/2720 + +**Full Changelog**: https://github.com/nebari-dev/nebari/compare/2024.7.1...2024.9.1 + + +## Release 2024.7.1 - August 8, 2024 + +> NOTE: Support for Digital Ocean deployments using CLI commands and related Terraform modules is being deprecated. Although Digital Ocean will no longer be directly supported in future releases, you can still deploy to Digital Ocean infrastructure using the current `existing` deployment option. + +### What's Changed +* Enable authentication by default in jupyter-server by @krassowski in https://github.com/nebari-dev/nebari/pull/2288 +* remove dns sleep by @Adam-D-Lewis in https://github.com/nebari-dev/nebari/pull/2550 +* Conda-store permissions v2 + load roles from keycloak by @aktech in https://github.com/nebari-dev/nebari/pull/2531 +* Restrict public access and add bucket encryption using cmk by @dcmcand in https://github.com/nebari-dev/nebari/pull/2525 +* Add overwrite to AWS coredns addon by @dcmcand in https://github.com/nebari-dev/nebari/pull/2538 +* Add a default roles at initialisation by @aktech in https://github.com/nebari-dev/nebari/pull/2546 +* Hide gallery section if no exhibits are configured by @krassowski in https://github.com/nebari-dev/nebari/pull/2549 +* Add note about ~/.bash_profile by @Adam-D-Lewis in https://github.com/nebari-dev/nebari/pull/2575 +* Expose jupyterlab-gallery branch and depth options by @krassowski in https://github.com/nebari-dev/nebari/pull/2556 +* #2566 Upgrade Jupyterhub ssh image by @arjxn-py in https://github.com/nebari-dev/nebari/pull/2576 +* Stop copying unnecessary files into user home directory by @Adam-D-Lewis in https://github.com/nebari-dev/nebari/pull/2578 +* Include deprecation notes for init/deploy subcommands by @viniciusdc in https://github.com/nebari-dev/nebari/pull/2582 +* Only download jar if file doesn't exist by @Adam-D-Lewis in https://github.com/nebari-dev/nebari/pull/2588 +* Remove unnecessary experimental flag by @Adam-D-Lewis in https://github.com/nebari-dev/nebari/pull/2606 +* Add typos spell checker to pre-commit by @Adam-D-Lewis in https://github.com/nebari-dev/nebari/pull/2568 +* Enh 2451 skip conditionals by @BrianCashProf in https://github.com/nebari-dev/nebari/pull/2569 +* Improve codespell support: adjust and concentrate config to pyproject.toml and fix more typos by @yarikoptic in https://github.com/nebari-dev/nebari/pull/2583 +* Move codespell config to pyproject.toml only by @Adam-D-Lewis in https://github.com/nebari-dev/nebari/pull/2611 +* Add `depends_on` for bucket encryption by @viniciusdc in https://github.com/nebari-dev/nebari/pull/2615 + +### New Contributors +* @BrianCashProf made their first contribution in https://github.com/nebari-dev/nebari/pull/2569 +* @yarikoptic made their first contribution in https://github.com/nebari-dev/nebari/pull/2583 + + +**Full Changelog**: https://github.com/nebari-dev/nebari/compare/2024.6.1...2024.7.1 + + +## Release 2024.6.1 - June 26, 2024 + +> NOTE: This release includes an upgrade to the `kube-prometheus-stack` Helm chart, resulting in a newer version of Grafana. When upgrading your Nebari cluster, you will be prompted to have Nebari update some CRDs and delete a DaemonSet on your behalf. If you prefer, you can also run the commands yourself, which will be shown to you. If you have any custom dashboards, you'll also need to back them up by [exporting them as JSON](https://grafana.com/docs/grafana/latest/dashboards/share-dashboards-panels/#export-a-dashboard-as-json), so you can [import them](https://grafana.com/docs/grafana/latest/dashboards/build-dashboards/import-dashboards/#import-a-dashboard) after upgrading. + +### What's Changed +* Fetch JupyterHub roles from Keycloak by @krassowski in https://github.com/nebari-dev/nebari/pull/2447 +* Update selector for Start server button to use button tag by @krassowski in https://github.com/nebari-dev/nebari/pull/2464 +* Reduce GCP Fixed Costs by 50% by @Adam-D-Lewis in https://github.com/nebari-dev/nebari/pull/2453 +* Restore JupyterHub updates from PR-2427 by @viniciusdc in https://github.com/nebari-dev/nebari/pull/2465 +* Workload identity by @Adam-D-Lewis in https://github.com/nebari-dev/nebari/pull/2460 +* Fix test using a non-specific selector by @krassowski in https://github.com/nebari-dev/nebari/pull/2475 +* add verify=false since we use self signed cert in tests by @Adam-D-Lewis in https://github.com/nebari-dev/nebari/pull/2481 +* fix forward auth when using custom cert by @Adam-D-Lewis in https://github.com/nebari-dev/nebari/pull/2479 +* Upgrade to JupyterHub 5.0.0b2 by @krassowski in https://github.com/nebari-dev/nebari/pull/2468 +* upgrade instructions for PR 2453 by @Adam-D-Lewis in https://github.com/nebari-dev/nebari/pull/2466 +* Use Helm Chart for JupyterHub 5.0.0 final by @krassowski in https://github.com/nebari-dev/nebari/pull/2484 +* Parse and insert keycloak roles scopes into JupyterHub by @aktech in https://github.com/nebari-dev/nebari/pull/2471 +* Add CITATION file by @pavithraes in https://github.com/nebari-dev/nebari/pull/2455 +* CI: add azure integration by @fangchenli in https://github.com/nebari-dev/nebari/pull/2061 +* Create trivy.yml by @dcmcand in https://github.com/nebari-dev/nebari/pull/2458 +* don't run azure deployment on PRs, only on schedule and manual trigger by @Adam-D-Lewis in https://github.com/nebari-dev/nebari/pull/2498 +* add cloud provider deployment status badges to README.md by @Adam-D-Lewis in https://github.com/nebari-dev/nebari/pull/2407 +* Upgrade kube-prometheus-stack helm chart by @marcelovilla in https://github.com/nebari-dev/nebari/pull/2472 +* upgrade note by @Adam-D-Lewis in https://github.com/nebari-dev/nebari/pull/2502 +* Remove VSCode from jhub_apps default services by @jbouder in https://github.com/nebari-dev/nebari/pull/2503 +* Explicit config by @Adam-D-Lewis in https://github.com/nebari-dev/nebari/pull/2294 +* fix general node scaling bug for azure by @Adam-D-Lewis in https://github.com/nebari-dev/nebari/pull/2517 +* Skip running cleanup on pull requests by @aktech in https://github.com/nebari-dev/nebari/pull/2488 +* 1792 Add docstrings to `upgrade.py` by @arjxn-py in https://github.com/nebari-dev/nebari/pull/2512 +* set's min TLS version for azure storage account to TLS 1.2 by @dcmcand in https://github.com/nebari-dev/nebari/pull/2522 +* Fix conda-store and Traefik Grafana Dashboards by @marcelovilla in https://github.com/nebari-dev/nebari/pull/2540 +* Implement support for jupyterlab-gallery config by @krassowski in https://github.com/nebari-dev/nebari/pull/2501 +* Add option to run CRDs updates and DaemonSet deletion on user's behalf. by @marcelovilla in https://github.com/nebari-dev/nebari/pull/2544 + +### New Contributors +* @arjxn-py made their first contribution in https://github.com/nebari-dev/nebari/pull/2512 + +**Full Changelog**: https://github.com/nebari-dev/nebari/compare/2024.5.1...2024.6.1 + +## Release 2024.5.1 - May 13, 2024 + +### What's Changed * make userscheduler run on general node group by @Adam-D-Lewis in * Upgrade to Pydantic V2 by @Adam-D-Lewis in @@ -66,6 +190,7 @@ This file is copied to nebari-dev/nebari-docs using a GitHub Action. --> **Full Changelog**: https://github.com/nebari-dev/nebari/compare/2024.3.2...2024.3.3 + ## Release 2024.3.2 - March 14, 2024 ### What's Changed @@ -246,7 +371,7 @@ command and follow the instructions * paginator for boto3 ec2 instance types by @sblair-metrostar in https://github.com/nebari-dev/nebari/pull/1923 * Update README.md -- fix typo. by @teoliphant in https://github.com/nebari-dev/nebari/pull/1925 * Add more unit tests, add cleanup step for Digital Ocean integration test by @iameskild in https://github.com/nebari-dev/nebari/pull/1910 -* Add cleanup step for AWS integration test, ensure diable_prompt is passed through by @iameskild in https://github.com/nebari-dev/nebari/pull/1921 +* Add cleanup step for AWS integration test, ensure disable_prompt is passed through by @iameskild in https://github.com/nebari-dev/nebari/pull/1921 * K8s 1.25 + More Improvements by @Adam-D-Lewis in https://github.com/nebari-dev/nebari/pull/1856 * adding lifecycle ignore to eks node group by @sblair-metrostar in https://github.com/nebari-dev/nebari/pull/1905 * nebari init unit tests by @sblair-metrostar in https://github.com/nebari-dev/nebari/pull/1931 diff --git a/SECURITY.md b/SECURITY.md index 76f80ef924..1dc8551632 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -8,4 +8,4 @@ You should feel comfortable upgrading if you're using our documented public APIs ## Reporting a Vulnerability -If you think you found a vulnerability, please report it at [nebari/security](https://github.com/nebari-dev/nebari/security/new). Please do not report security vulnerabilities on our public issue tracker. Exposing vulnerabilities publicly without giving maintainers a chance to release a fix puts users at risk. +If you think you found a vulnerability, please report it at [nebari/security](https://github.com/nebari-dev/nebari/security/advisories/new). Please do not report security vulnerabilities on our public issue tracker. Exposing vulnerabilities publicly without giving maintainers a chance to release a fix puts users at risk. diff --git a/pyproject.toml b/pyproject.toml index 91b0fe4eda..75301b8549 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -60,6 +60,13 @@ dependencies = [ "bcrypt==4.0.1", "boto3==1.34.63", "cloudflare==2.11.7", + "google-auth==2.31.0", + "google-cloud-compute==1.19.1", + "google-cloud-container==2.49.0", + "google-cloud-iam==2.15.1", + "google-cloud-storage==2.18.0", + "grpc-google-iam-v1==0.13.1", + "jinja2", "kubernetes==27.2.0", "pluggy==1.3.0", "prompt-toolkit==3.0.36", @@ -72,6 +79,7 @@ dependencies = [ "ruamel.yaml==0.18.6", "typer==0.9.0", "packaging==23.2", + "typing-extensions==4.11.0", ] [project.optional-dependencies] @@ -82,7 +90,6 @@ dev = [ "diagrams", "escapism", "importlib-metadata<5.0", - "jinja2", "mypy==1.6.1", "paramiko", "pre-commit", @@ -176,3 +183,16 @@ exclude_also = [ "@(abc\\.)?abstractmethod", ] ignore_errors = false + +[tool.typos] +files.extend-exclude = ["_build", "*/build/*", "*/node_modules/*", "nebari.egg-info", "*.git", "*.js", "*.json", "*.yaml", "*.yml", "pre-commit-config.yaml"] +default.extend-ignore-re = ["(?Rm)^.*(#|//)\\s*typos: ignore$"] +default.extend-ignore-words-re = ["aks", "AKS"] +default.check-filename = true + +[tool.codespell] +# Ref: https://github.com/codespell-project/codespell#using-a-config-file +skip = '_build,*/build/*,*/node_modules/*,nebari.egg-info,*.git,package-lock.json,*.lock' +check-hidden = true +ignore-regex = '^\s*"image/\S+": ".*' +ignore-words-list = 'aks' diff --git a/pytest.ini b/pytest.ini index 0555ec6b2d..d299f154a8 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,6 +1,6 @@ [pytest] addopts = - # show tests that (f)ailed, (E)rror, or (X)passed in the summary + # show tests that (f)ailed, (E)rror, or (X)passed in the summary # typos: ignore -rfEX # Make tracebacks shorter --tb=native diff --git a/src/_nebari/config.py b/src/_nebari/config.py index 7c27274f36..9d7dec4bd4 100644 --- a/src/_nebari/config.py +++ b/src/_nebari/config.py @@ -103,7 +103,8 @@ def write_configuration( """Write the nebari configuration file to disk""" with config_filename.open(mode) as f: if isinstance(config, pydantic.BaseModel): - yaml.dump(config.model_dump(), f) + config_dict = config.model_dump() + yaml.dump(config_dict, f) else: config = dump_nested_model(config) yaml.dump(config, f) diff --git a/src/_nebari/constants.py b/src/_nebari/constants.py index d0e3f37444..6e57519fee 100644 --- a/src/_nebari/constants.py +++ b/src/_nebari/constants.py @@ -1,10 +1,14 @@ -CURRENT_RELEASE = "2024.5.1" +CURRENT_RELEASE = "2024.9.1" +HELM_VERSION = "v3.15.3" +KUSTOMIZE_VERSION = "5.4.3" # NOTE: Terraform cannot be upgraded further due to Hashicorp licensing changes # implemented in August 2023. # https://www.hashicorp.com/license-faq TERRAFORM_VERSION = "1.5.7" +KUBERHEALTHY_HELM_VERSION = "100" + # 04-kubernetes-ingress DEFAULT_TRAEFIK_IMAGE_TAG = "2.9.1" diff --git a/src/_nebari/deploy.py b/src/_nebari/deploy.py index 46cc20179b..4478e65f75 100644 --- a/src/_nebari/deploy.py +++ b/src/_nebari/deploy.py @@ -49,7 +49,9 @@ def deploy_configuration( stage_outputs = {} with contextlib.ExitStack() as stack: for stage in stages: - s = stage(output_directory=pathlib.Path.cwd(), config=config) + s: hookspecs.NebariStage = stage( + output_directory=pathlib.Path.cwd(), config=config + ) stack.enter_context(s.deploy(stage_outputs, disable_prompt)) if not disable_checks: diff --git a/src/_nebari/deprecate.py b/src/_nebari/deprecate.py index 0b9494e9ad..d43c057b9c 100644 --- a/src/_nebari/deprecate.py +++ b/src/_nebari/deprecate.py @@ -7,4 +7,6 @@ ".github/workflows/image.yaml", ".github/workflows/jupyterhub-pr.yaml", ".github/workflows/jupyterhub.yaml", + # v2024.7.3 renamed misspelled file path + "stages/07-kubernetes-services/modules/kubernetes/services/dask-gateway/controler.tf", # codespell:ignore ] diff --git a/src/_nebari/destroy.py b/src/_nebari/destroy.py index 900ad8acf8..cf17bd733f 100644 --- a/src/_nebari/destroy.py +++ b/src/_nebari/destroy.py @@ -22,7 +22,9 @@ def destroy_configuration(config: schema.Main, stages: List[hookspecs.NebariStag with contextlib.ExitStack() as stack: for stage in stages: try: - s = stage(output_directory=pathlib.Path.cwd(), config=config) + s: hookspecs.NebariStage = stage( + output_directory=pathlib.Path.cwd(), config=config + ) stack.enter_context(s.destroy(stage_outputs, status)) except Exception as e: status[s.name] = False diff --git a/src/_nebari/initialize.py b/src/_nebari/initialize.py index df693ca8f0..7745df2a98 100644 --- a/src/_nebari/initialize.py +++ b/src/_nebari/initialize.py @@ -233,7 +233,7 @@ def github_auto_provision(config: pydantic.BaseModel, owner: str, repo: str): github.create_repository( owner, repo, - description=f"Nebari {config.project_name}-{config.provider}", + description=f"Nebari {config.project_name}-{config.provider.value}", homepage=f"https://{config.domain}", ) except requests.exceptions.HTTPError as he: diff --git a/src/_nebari/keycloak.py b/src/_nebari/keycloak.py index ea8815940d..6bfea9b8b3 100644 --- a/src/_nebari/keycloak.py +++ b/src/_nebari/keycloak.py @@ -81,27 +81,16 @@ def list_users(keycloak_admin: keycloak.KeycloakAdmin): ) -def get_keycloak_admin_from_config(config: schema.Main): - keycloak_server_url = os.environ.get( - "KEYCLOAK_SERVER_URL", f"https://{config.domain}/auth/" - ) - - keycloak_username = os.environ.get("KEYCLOAK_ADMIN_USERNAME", "root") - keycloak_password = os.environ.get( - "KEYCLOAK_ADMIN_PASSWORD", config.security.keycloak.initial_root_password - ) - - should_verify_tls = config.certificate.type != CertificateEnum.selfsigned - +def get_keycloak_admin(server_url, username, password, verify=False): try: keycloak_admin = keycloak.KeycloakAdmin( - server_url=keycloak_server_url, - username=keycloak_username, - password=keycloak_password, + server_url=server_url, + username=username, + password=password, realm_name=os.environ.get("KEYCLOAK_REALM", "nebari"), user_realm_name="master", auto_refresh_token=("get", "put", "post", "delete"), - verify=should_verify_tls, + verify=verify, ) except ( keycloak.exceptions.KeycloakConnectionError, @@ -112,6 +101,26 @@ def get_keycloak_admin_from_config(config: schema.Main): return keycloak_admin +def get_keycloak_admin_from_config(config: schema.Main): + keycloak_server_url = os.environ.get( + "KEYCLOAK_SERVER_URL", f"https://{config.domain}/auth/" + ) + + keycloak_username = os.environ.get("KEYCLOAK_ADMIN_USERNAME", "root") + keycloak_password = os.environ.get( + "KEYCLOAK_ADMIN_PASSWORD", config.security.keycloak.initial_root_password + ) + + should_verify_tls = config.certificate.type != CertificateEnum.selfsigned + + return get_keycloak_admin( + server_url=keycloak_server_url, + username=keycloak_username, + password=keycloak_password, + verify=should_verify_tls, + ) + + def keycloak_rest_api_call(config: schema.Main = None, request: str = None): """Communicate directly with the Keycloak REST API by passing it a request""" keycloak_server_url = os.environ.get( diff --git a/src/_nebari/provider/cicd/github.py b/src/_nebari/provider/cicd/github.py index 2563af6ad9..0c9003ecdd 100644 --- a/src/_nebari/provider/cicd/github.py +++ b/src/_nebari/provider/cicd/github.py @@ -201,16 +201,6 @@ def setup_python_step(): ) -def setup_gcloud(): - return GHA_job_step( - name="Setup gcloud", - uses="google-github-actions/auth@v1", - with_={ - "credentials_json": "${{ secrets.GOOGLE_CREDENTIALS }}", - }, - ) - - def install_nebari_step(nebari_version): return GHA_job_step(name="Install Nebari", run=pip_install_nebari(nebari_version)) @@ -226,9 +216,6 @@ def gen_nebari_ops(config): step3 = install_nebari_step(config.nebari_version) gha_steps = [step1, step2, step3] - if config.provider == schema.ProviderEnum.gcp: - gha_steps.append(setup_gcloud()) - for step in config.ci_cd.before_script: gha_steps.append(GHA_job_step(**step)) diff --git a/src/_nebari/provider/cloud/google_cloud.py b/src/_nebari/provider/cloud/google_cloud.py index 67d0ebad7a..6b54e40e9d 100644 --- a/src/_nebari/provider/cloud/google_cloud.py +++ b/src/_nebari/provider/cloud/google_cloud.py @@ -1,7 +1,11 @@ import functools import json -import subprocess -from typing import Dict, List, Set +import os +from typing import List, Set + +import google.api_core.exceptions +from google.auth import load_credentials_from_dict, load_credentials_from_file +from google.cloud import compute_v1, container_v1, iam_admin_v1, storage from _nebari.constants import GCP_ENV_DOCS from _nebari.provider.cloud.commons import filter_by_highest_supported_k8s_version @@ -15,232 +19,144 @@ def check_credentials() -> None: @functools.lru_cache() -def projects() -> Dict[str, str]: - """Return a dict of available projects.""" +def load_credentials(): check_credentials() - output = subprocess.check_output( - ["gcloud", "projects", "list", "--format=json(name,projectId)"] - ) - data = json.loads(output) - return {_["name"]: _["projectId"] for _ in data} + credentials = os.environ["GOOGLE_CREDENTIALS"] + project_id = os.environ["PROJECT_ID"] + + # Scopes need to be explicitly defined when using workload identity + # federation. + scopes = ["https://www.googleapis.com/auth/cloud-platform"] + + # Google credentials are stored as strings in GHA secrets so we need + # to determine if the credentials are stored as a file or not before + # reading them + if credentials.endswith(".json"): + loaded_credentials, _ = load_credentials_from_file(credentials, scopes=scopes) + else: + loaded_credentials, _ = load_credentials_from_dict( + json.loads(credentials), scopes=scopes + ) + + return loaded_credentials, project_id @functools.lru_cache() def regions() -> Set[str]: - """Return a set of available regions.""" - check_credentials() - output = subprocess.check_output( - ["gcloud", "compute", "regions", "list", "--format=json(name)"] - ) - data = json.loads(output) - return {_["name"] for _ in data} + """Return a dict of available regions.""" + credentials, project_id = load_credentials() + client = compute_v1.RegionsClient(credentials=credentials) + response = client.list(project=project_id) - -@functools.lru_cache() -def zones(project: str, region: str) -> Dict[str, str]: - """Return a dict of available zones.""" - check_credentials() - output = subprocess.check_output( - ["gcloud", "compute", "zones", "list", "--project", project, "--format=json"] - ) - data = json.loads(output.decode("utf-8")) - return {_["description"]: _["name"] for _ in data if _["name"].startswith(region)} + return {region.name for region in response} @functools.lru_cache() def kubernetes_versions(region: str) -> List[str]: """Return list of available kubernetes supported by cloud provider. Sorted from oldest to latest.""" - check_credentials() - output = subprocess.check_output( - [ - "gcloud", - "container", - "get-server-config", - "--region", - region, - "--format=json", - ] + credentials, project_id = load_credentials() + client = container_v1.ClusterManagerClient(credentials=credentials) + response = client.get_server_config( + name=f"projects/{project_id}/locations/{region}" ) - data = json.loads(output.decode("utf-8")) - supported_kubernetes_versions = sorted([_ for _ in data["validMasterVersions"]]) - return filter_by_highest_supported_k8s_version(supported_kubernetes_versions) - - -@functools.lru_cache() -def instances(project: str) -> Dict[str, str]: - """Return a dict of available instances.""" - check_credentials() - output = subprocess.check_output( - [ - "gcloud", - "compute", - "machine-types", - "list", - "--project", - project, - "--format=json", - ] - ) - data = json.loads(output.decode("utf-8")) - return {_["description"]: _["name"] for _ in data} - + supported_kubernetes_versions = response.valid_master_versions -def activated_services() -> Set[str]: - """Return a list of activated services.""" - check_credentials() - output = subprocess.check_output( - [ - "gcloud", - "services", - "list", - "--enabled", - "--format=json(config.title)", - ] - ) - data = json.loads(output) - return {service["config"]["title"] for service in data} + return filter_by_highest_supported_k8s_version(supported_kubernetes_versions) -def cluster_exists(cluster_name: str, project_id: str, region: str) -> bool: +def cluster_exists(cluster_name: str, region: str) -> bool: """Check if a GKE cluster exists.""" + credentials, project_id = load_credentials() + client = container_v1.ClusterManagerClient(credentials=credentials) + try: - subprocess.check_output( - [ - "gcloud", - "container", - "clusters", - "describe", - cluster_name, - "--project", - project_id, - "--region", - region, - ] + client.get_cluster( + name=f"projects/{project_id}/locations/{region}/clusters/{cluster_name}" ) - return True - except subprocess.CalledProcessError: + except google.api_core.exceptions.NotFound: return False + return True -def bucket_exists(bucket_name: str, project_id: str) -> bool: +def bucket_exists(bucket_name: str) -> bool: """Check if a storage bucket exists.""" + credentials, _ = load_credentials() + client = storage.Client(credentials=credentials) + try: - print(f"Checking if bucket {bucket_name} exists in project {project_id}.") - subprocess.check_output( - [ - "gsutil", - "ls", - f"gs://{bucket_name}/", - "-p", - project_id, - ] - ) - return True - except subprocess.CalledProcessError: + client.get_bucket(bucket_name) + except google.api_core.exceptions.NotFound: return False + return True -def service_account_exists(service_account_name: str, project_id: str) -> bool: +def service_account_exists(service_account_name: str) -> bool: """Check if a service account exists.""" + credentials, project_id = load_credentials() + client = iam_admin_v1.IAMClient(credentials=credentials) + + service_account_path = client.service_account_path(project_id, service_account_name) try: - subprocess.check_output( - [ - "gcloud", - "iam", - "service-accounts", - "describe", - service_account_name, - "--project", - project_id, - ] - ) - return True - except subprocess.CalledProcessError: + client.get_service_account(name=service_account_path) + except google.api_core.exceptions.NotFound: return False + return True -def delete_cluster(cluster_name: str, project_id: str, region: str): +def delete_cluster(cluster_name: str, region: str): """Delete a GKE cluster if it exists.""" - check_credentials() - - if not cluster_exists(cluster_name, project_id, region): + credentials, project_id = load_credentials() + if not cluster_exists(cluster_name, region): print( f"Cluster {cluster_name} does not exist in project {project_id}, region {region}. Exiting gracefully." ) return + client = container_v1.ClusterManagerClient(credentials=credentials) try: - subprocess.check_call( - [ - "gcloud", - "container", - "clusters", - "delete", - cluster_name, - "--project", - project_id, - "--region", - region, - "--quiet", - ] + client.delete_cluster( + name=f"projects/{project_id}/locations/{region}/clusters/{cluster_name}" ) print(f"Successfully deleted cluster {cluster_name}.") - except subprocess.CalledProcessError as e: - print(f"Failed to delete cluster {cluster_name}. Error: {e}") + except google.api_core.exceptions.GoogleAPIError as e: + print(f"Failed to delete bucket {bucket_name}. Error: {e}") -def delete_storage_bucket(bucket_name: str, project_id: str): +def delete_storage_bucket(bucket_name: str): """Delete a storage bucket if it exists.""" - check_credentials() + credentials, project_id = load_credentials() - if not bucket_exists(bucket_name, project_id): + if not bucket_exists(bucket_name): print( f"Bucket {bucket_name} does not exist in project {project_id}. Exiting gracefully." ) return + client = storage.Client(credentials=credentials) + bucket = client.get_bucket(bucket_name) try: - subprocess.check_call( - [ - "gsutil", - "-m", - "rm", - "-r", - f"gs://{bucket_name}", - "-p", - project_id, - ] - ) + bucket.delete(force=True) print(f"Successfully deleted bucket {bucket_name}.") - except subprocess.CalledProcessError as e: + except google.api_core.exceptions.GoogleAPIError as e: print(f"Failed to delete bucket {bucket_name}. Error: {e}") -def delete_service_account(service_account_name: str, project_id: str): +def delete_service_account(service_account_name: str): """Delete a service account if it exists.""" - check_credentials() + credentials, project_id = load_credentials() - if not service_account_exists(service_account_name, project_id): + if not service_account_exists(service_account_name): print( f"Service account {service_account_name} does not exist in project {project_id}. Exiting gracefully." ) return + client = iam_admin_v1.IAMClient(credentials=credentials) + service_account_path = client.service_account_path(project_id, service_account_name) try: - subprocess.check_call( - [ - "gcloud", - "iam", - "service-accounts", - "delete", - service_account_name, - "--quiet", - "--project", - project_id, - ] - ) + client.delete_service_account(name=service_account_path) print(f"Successfully deleted service account {service_account_name}.") - except subprocess.CalledProcessError as e: + except google.api_core.exceptions.GoogleAPIError as e: print(f"Failed to delete service account {service_account_name}. Error: {e}") @@ -257,43 +173,6 @@ def gcp_cleanup(config: schema.Main): f"{project_name}-{namespace}@{project_id}.iam.gserviceaccount.com" ) - delete_cluster(cluster_name, project_id, region) - delete_storage_bucket(bucket_name, project_id) - delete_service_account(service_account_name, project_id) - - -def check_missing_service() -> None: - """Check if all required services are activated.""" - required = { - "Compute Engine API", - "Kubernetes Engine API", - "Cloud Monitoring API", - "Cloud Autoscaling API", - "Identity and Access Management (IAM) API", - "Cloud Resource Manager API", - } - activated = activated_services() - common = required.intersection(activated) - missing = required.difference(common) - if missing: - raise ValueError( - f"""Missing required services: {missing}\n - Please see the documentation for more information: {GCP_ENV_DOCS}""" - ) - - -# Getting pricing data could come from here -# https://cloudpricingcalculator.appspot.com/static/data/pricelist.json - - -### PYDANTIC VALIDATORS ### - - -def validate_region(region: str) -> str: - """Validate the GCP region is valid.""" - available_regions = regions() - if region not in available_regions: - raise ValueError( - f"Region {region} is not one of available regions {available_regions}" - ) - return region + delete_cluster(cluster_name, region) + delete_storage_bucket(bucket_name) + delete_service_account(service_account_name) diff --git a/src/_nebari/provider/helm.py b/src/_nebari/provider/helm.py new file mode 100644 index 0000000000..a1d0a7b8c6 --- /dev/null +++ b/src/_nebari/provider/helm.py @@ -0,0 +1,71 @@ +import logging +import os +import subprocess +import tempfile +from pathlib import Path + +from _nebari import constants +from _nebari.utils import run_subprocess_cmd + +logger = logging.getLogger(__name__) + + +class HelmException(Exception): + pass + + +def download_helm_binary(version=constants.HELM_VERSION) -> Path: + filename_directory = Path(tempfile.gettempdir()) / "helm" / version + filename_path = filename_directory / "helm" + + if not filename_directory.is_dir(): + filename_directory.mkdir(parents=True) + + if not filename_path.is_file(): + logger.info( + "downloading and extracting Helm binary version %s to path=%s", + constants.HELM_VERSION, + filename_path, + ) + old_path = os.environ.get("PATH") + new_path = f"{filename_directory}:{old_path}" + install_script = subprocess.run( + [ + "curl", + "-s", + "https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3", + ], + stdout=subprocess.PIPE, + check=True, + ) + subprocess.run( + [ + "bash", + "-s", + "--", + "-v", + constants.HELM_VERSION, + "--no-sudo", + ], + input=install_script.stdout, + check=True, + env={"HELM_INSTALL_DIR": str(filename_directory), "PATH": new_path}, + ) + + filename_path.chmod(0o555) + return filename_path + + +def run_helm_subprocess(processargs, **kwargs) -> None: + helm_path = download_helm_binary() + logger.info("helm at %s", helm_path) + if run_subprocess_cmd([helm_path] + processargs, **kwargs): + raise HelmException("Helm returned an error") + + +def version() -> str: + helm_path = download_helm_binary() + logger.info("checking helm=%s version", helm_path) + + version_output = subprocess.check_output([helm_path, "version"]).decode("utf-8") + return version_output diff --git a/src/_nebari/provider/kubernetes.py b/src/_nebari/provider/kubernetes.py new file mode 100644 index 0000000000..a39fc2709d --- /dev/null +++ b/src/_nebari/provider/kubernetes.py @@ -0,0 +1,396 @@ +# Copyright 2019 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import os +import pathlib +import re + +import yaml +from kubernetes import client +from kubernetes.dynamic.client import DynamicClient +from kubernetes.dynamic.resource import Resource + +UPPER_FOLLOWED_BY_LOWER_RE = re.compile("(.)([A-Z][a-z]+)") +LOWER_OR_NUM_FOLLOWED_BY_UPPER_RE = re.compile("([a-z0-9])([A-Z])") + + +def create_from_directory( + k8s_client, yaml_dir=None, verbose=False, namespace="default", apply=False, **kwargs +): + """ + Perform an action from files from a directory. Pass True for verbose to + print confirmation information. + + Input: + k8s_client: an ApiClient object, initialized with the client args. + yaml_dir: string. Contains the path to directory. + verbose: If True, print confirmation from the create action. + Default is False. + namespace: string. Contains the namespace to create all + resources inside. The namespace must preexist otherwise + the resource creation will fail. If the API object in + the yaml file already contains a namespace definition + this parameter has no effect. + + Available parameters for creating : + :param async_req bool + :param bool include_uninitialized: If true, partially initialized + resources are included in the response. + :param str pretty: If 'true', then the output is pretty printed. + :param str dry_run: When present, indicates that modifications + should not be persisted. An invalid or unrecognized dryRun + directive will result in an error response and no further + processing of the request. + Valid values are: - All: all dry run stages will be processed + + Returns: + The list containing the created kubernetes API objects. + + Raises: + FailToCreateError which holds list of `client.rest.ApiException` + instances for each object that failed to create. + """ + + if not yaml_dir: + raise ValueError("`yaml_dir` argument must be provided") + elif not os.path.isdir(yaml_dir): # noqa + raise ValueError("`yaml_dir` argument must be a path to directory") + + files = [ + os.path.join(yaml_dir, i) # noqa + for i in os.listdir(yaml_dir) + if os.path.isfile(os.path.join(yaml_dir, i)) # noqa + ] + if not files: + raise ValueError("`yaml_dir` contains no files") + + failures = [] + k8s_objects_all = [] + + for file in files: + try: + k8s_objects = create_from_yaml( + k8s_client, + file, + verbose=verbose, + namespace=namespace, + apply=apply, + **kwargs, + ) + k8s_objects_all.append(k8s_objects) + except OperationFailureError as failure: + failures.extend(failure.api_exceptions) + if failures: + raise OperationFailureError(failures) + return k8s_objects_all + + +def create_from_yaml( + k8s_client, + yaml_file=None, + yaml_objects=None, + verbose=False, + namespace="default", + apply=False, + **kwargs, +): + """ + Perform an action from a yaml file. Pass True for verbose to + print confirmation information. + Input: + yaml_file: string. Contains the path to yaml file. + k8s_client: an ApiClient object, initialized with the client args. + yaml_objects: List[dict]. Optional list of YAML objects; used instead + of reading the `yaml_file`. Default is None. + verbose: If True, print confirmation from the create action. + Default is False. + namespace: string. Contains the namespace to create all + resources inside. The namespace must preexist otherwise + the resource creation will fail. If the API object in + the yaml file already contains a namespace definition + this parameter has no effect. + + Available parameters for creating : + :param async_req bool + :param bool include_uninitialized: If true, partially initialized + resources are included in the response. + :param str pretty: If 'true', then the output is pretty printed. + :param str dry_run: When present, indicates that modifications + should not be persisted. An invalid or unrecognized dryRun + directive will result in an error response and no further + processing of the request. + Valid values are: - All: all dry run stages will be processed + + Returns: + The created kubernetes API objects. + + Raises: + FailToCreateError which holds list of `client.rest.ApiException` + instances for each object that failed to create. + """ + + def create_with(objects, apply=apply): + failures = [] + k8s_objects = [] + for yml_document in objects: + if yml_document is None: + continue + try: + created = create_from_dict( + k8s_client, + yml_document, + verbose, + namespace=namespace, + apply=apply, + **kwargs, + ) + k8s_objects.append(created) + except OperationFailureError as failure: + failures.extend(failure.api_exceptions) + if failures: + raise OperationFailureError(failures) + return k8s_objects + + class Loader(yaml.loader.SafeLoader): + yaml_implicit_resolvers = yaml.loader.SafeLoader.yaml_implicit_resolvers.copy() + if "=" in yaml_implicit_resolvers: + yaml_implicit_resolvers.pop("=") + + if yaml_objects: + yml_document_all = yaml_objects + return create_with(yml_document_all) + elif yaml_file: + with open(os.path.abspath(yaml_file)) as f: # noqa + yml_document_all = yaml.load_all(f, Loader=Loader) + return create_with(yml_document_all, apply) + else: + raise ValueError( + "One of `yaml_file` or `yaml_objects` arguments must be provided" + ) + + +def create_from_dict( + k8s_client, data, verbose=False, namespace="default", apply=False, **kwargs +): + """ + Perform an action from a dictionary containing valid kubernetes + API object (i.e. List, Service, etc). + + Input: + k8s_client: an ApiClient object, initialized with the client args. + data: a dictionary holding valid kubernetes objects + verbose: If True, print confirmation from the create action. + Default is False. + namespace: string. Contains the namespace to create all + resources inside. The namespace must preexist otherwise + the resource creation will fail. If the API object in + the yaml file already contains a namespace definition + this parameter has no effect. + + Returns: + The created kubernetes API objects. + + Raises: + FailToCreateError which holds list of `client.rest.ApiException` + instances for each object that failed to create. + """ + # If it is a list type, will need to iterate its items + api_exceptions = [] + k8s_objects = [] + + if "List" in data["kind"]: + # Could be "List" or "Pod/Service/...List" + # This is a list type. iterate within its items + kind = data["kind"].replace("List", "") + for yml_object in data["items"]: + # Mitigate cases when server returns a xxxList object + # See kubernetes-client/python#586 + if kind != "": + yml_object["apiVersion"] = data["apiVersion"] + yml_object["kind"] = kind + try: + created = create_from_yaml_single_item( + k8s_client, + yml_object, + verbose, + namespace=namespace, + apply=apply, + **kwargs, + ) + k8s_objects.append(created) + except client.rest.ApiException as api_exception: + api_exceptions.append(api_exception) + else: + # This is a single object. Call the single item method + try: + created = create_from_yaml_single_item( + k8s_client, data, verbose, namespace=namespace, apply=apply, **kwargs + ) + k8s_objects.append(created) + except client.rest.ApiException as api_exception: + api_exceptions.append(api_exception) + + # In case we have exceptions waiting for us, raise them + if api_exceptions: + raise OperationFailureError(api_exceptions) + + return k8s_objects + + +def create_from_yaml_single_item( + k8s_client, yml_object, verbose=False, apply=False, **kwargs +): + kind = yml_object["kind"] + if apply: + apply_client = DynamicClient(k8s_client).resources.get( + api_version=yml_object["apiVersion"], kind=kind + ) + resp = apply_client.server_side_apply( + body=yml_object, field_manager="python-client", **kwargs + ) + return resp + group, _, version = yml_object["apiVersion"].partition("/") + if version == "": + version = group + group = "core" + # Take care for the case e.g. api_type is "apiextensions.k8s.io" + # Only replace the last instance + group = "".join(group.rsplit(".k8s.io", 1)) + # convert group name from DNS subdomain format to + # python class name convention + group = "".join(word.capitalize() for word in group.split(".")) + fcn_to_call = "{0}{1}Api".format(group, version.capitalize()) + k8s_api = getattr(client, fcn_to_call)(k8s_client) + # Replace CamelCased action_type into snake_case + kind = UPPER_FOLLOWED_BY_LOWER_RE.sub(r"\1_\2", kind) + kind = LOWER_OR_NUM_FOLLOWED_BY_UPPER_RE.sub(r"\1_\2", kind).lower() + # Expect the user to create namespaced objects more often + if hasattr(k8s_api, "create_namespaced_{0}".format(kind)): + # Decide which namespace we are going to put the object in, + # if any + if "namespace" in yml_object["metadata"]: + namespace = yml_object["metadata"]["namespace"] + kwargs["namespace"] = namespace + resp = getattr(k8s_api, "create_namespaced_{0}".format(kind))( + body=yml_object, **kwargs + ) + else: + kwargs.pop("namespace", None) + resp = getattr(k8s_api, "create_{0}".format(kind))(body=yml_object, **kwargs) + if verbose: + msg = "{0} created.".format(kind) + if hasattr(resp, "status"): + msg += " status='{0}'".format(str(resp.status)) + print(msg) + return resp + + +def delete_from_yaml( + k8s_client: client.ApiClient, yaml_file: pathlib.Path = None, verbose: bool = False +) -> None: + """ + Delete all objects in a yaml file. Pass True for verbose to + print confirmation information. + Input: + yaml_file: string. Contains the path to yaml file. + k8s_client: an ApiClient object, initialized with the client args. + + Returns: + None + + Raises: + OperationFailureError which holds list of `client.rest.ApiException` + instances for each object that failed to delete. + """ + dynamic_client = DynamicClient(k8s_client) + k8s_objects = parse_yaml_file(yaml_file) + exceptions = [] + for object in k8s_objects: + try: + if verbose: + print(f"Deleting {object.kind} {object.name}") + if object.namespaced: + dynamic_client.resources.get( + api_version=object.api_version, kind=object.kind + ).delete( + name=object.name, + namespace=object.extra_args.get("namespace", "default"), + ) + else: + dynamic_client.resources.get( + api_version=object.api_version, kind=object.kind + ).delete(name=object.name) + except client.rest.ApiException as api_exception: + if api_exception.reason == "Not Found": + continue + if verbose: + print(f"Failed to delete {object.kind} {object.name}") + exceptions.append(api_exception) + except Exception as e: + print(f"Warning, failed to delete {object.kind} {object.name}: {e}") + if exceptions: + raise OperationFailureError(exceptions) + + +def parse_yaml_file(yaml_file: pathlib.Path) -> list: + """ + Parse a yaml file and return a list of dictionaries. + Input: + yaml_file: pathlib.Path. Contains the path to yaml file. + + Returns: + A list of kubernetes objects in the yaml file. + """ + + class Loader(yaml.loader.SafeLoader): + yaml_implicit_resolvers = yaml.loader.SafeLoader.yaml_implicit_resolvers.copy() + if "=" in yaml_implicit_resolvers: + yaml_implicit_resolvers.pop("=") + + with open(yaml_file.absolute()) as f: # noqa + yml_document_all = yaml.load_all(f, Loader=Loader) + + objects = [] + for doc in yml_document_all: + object = Resource( + api_version=doc["apiVersion"], + prefix=doc["apiVersion"].split("/")[0], + kind=doc["kind"], + namespaced=True if "namespace" in doc["metadata"] else False, + name=doc["metadata"]["name"], + body=doc, + namespace=doc["metadata"].get("namespace", None), + annotations=doc["metadata"].get("annotations", None), + ) + objects.append(object) + return objects + + +class OperationFailureError(Exception): + """ + An exception class for handling error if an error occurred when + handling a yaml file. + """ + + def __init__(self, api_exceptions): + self.api_exceptions = api_exceptions + + def __str__(self): + msg = "" + for api_exception in self.api_exceptions: + msg += "Error from server ({0}): {1}".format( + api_exception.reason, api_exception.body + ) + return msg diff --git a/src/_nebari/provider/kustomize.py b/src/_nebari/provider/kustomize.py new file mode 100644 index 0000000000..a93ac950ad --- /dev/null +++ b/src/_nebari/provider/kustomize.py @@ -0,0 +1,65 @@ +import logging +import subprocess +import tempfile +from pathlib import Path + +from _nebari import constants +from _nebari.utils import run_subprocess_cmd + +logger = logging.getLogger(__name__) + + +class KustomizeException(Exception): + pass + + +def download_kustomize_binary(version=constants.KUSTOMIZE_VERSION) -> Path: + filename_directory = Path(tempfile.gettempdir()) / "kustomize" / version + filename_path = filename_directory / "kustomize" + + if not filename_directory.is_dir(): + filename_directory.mkdir(parents=True) + + if not filename_path.is_file(): + logger.info( + "downloading and extracting kustomize binary version %s to path=%s", + constants.KUSTOMIZE_VERSION, + filename_path, + ) + install_script = subprocess.run( + [ + "curl", + "-s", + "https://raw.githubusercontent.com/kubernetes-sigs/kustomize/master/hack/install_kustomize.sh", + ], + stdout=subprocess.PIPE, + check=True, + ) + subprocess.run( + ["bash", "-s", constants.KUSTOMIZE_VERSION, str(filename_directory)], + input=install_script.stdout, + check=True, + ) + + filename_path.chmod(0o555) + return filename_path + + +def run_kustomize_subprocess(processargs, **kwargs) -> None: + kustomize_path = download_kustomize_binary() + try: + run_subprocess_cmd( + [kustomize_path] + processargs, capture_output=True, **kwargs + ) + except subprocess.CalledProcessError as e: + raise KustomizeException("Kustomize returned an error: %s" % e.stderr) + + +def version() -> str: + kustomize_path = download_kustomize_binary() + logger.info("checking kustomize=%s version", kustomize_path) + + version_output = subprocess.check_output([kustomize_path, "version"]).decode( + "utf-8" + ) + return version_output diff --git a/src/_nebari/provider/terraform.py b/src/_nebari/provider/terraform.py index 6f6ad6930b..59d88e76dd 100644 --- a/src/_nebari/provider/terraform.py +++ b/src/_nebari/provider/terraform.py @@ -114,8 +114,10 @@ def download_terraform_binary(version=constants.TERRAFORM_VERSION): def run_terraform_subprocess(processargs, **kwargs): terraform_path = download_terraform_binary() logger.info(f" terraform at {terraform_path}") - if run_subprocess_cmd([terraform_path] + processargs, **kwargs): + exit_code, output = run_subprocess_cmd([terraform_path] + processargs, **kwargs) + if exit_code != 0: raise TerraformException("Terraform returned an error") + return output def version(): @@ -183,6 +185,29 @@ def tfimport(addr, id, directory=None, var_files=None, exist_ok=False): raise e +def show(directory=None, terraform_init: bool = True) -> dict: + + if terraform_init: + init(directory) + + logger.info(f"terraform show directory={directory}") + command = ["show", "-json"] + with timer(logger, "terraform show"): + try: + output = json.loads( + run_terraform_subprocess( + command, + cwd=directory, + prefix="terraform", + strip_errors=True, + capture_output=True, + ) + ) + return output + except TerraformException as e: + raise e + + def refresh(directory=None, var_files=None): var_files = var_files or [] diff --git a/src/_nebari/stages/base.py b/src/_nebari/stages/base.py index 60a4821a24..cef1322e95 100644 --- a/src/_nebari/stages/base.py +++ b/src/_nebari/stages/base.py @@ -2,12 +2,234 @@ import inspect import os import pathlib +import shutil +import sys +import tempfile from typing import Any, Dict, List, Tuple -from _nebari.provider import terraform +from jinja2 import Environment, FileSystemLoader +from kubernetes import client, config +from kubernetes.client.rest import ApiException + +from _nebari.provider import helm, kubernetes, kustomize, terraform from _nebari.stages.tf_objects import NebariTerraformState from nebari.hookspecs import NebariStage +KUSTOMIZATION_TEMPLATE = "kustomization.yaml.tmpl" + + +class NebariKustomizeStage(NebariStage): + @property + def template_directory(self): + return pathlib.Path(inspect.getfile(self.__class__)).parent / "template" + + @property + def stage_prefix(self): + return pathlib.Path("stages") / self.name + + @property + def kustomize_vars(self): + return {} + + failed_to_create = False + error_message = "" + + def _get_k8s_client(self, stage_outputs: Dict[str, Dict[str, Any]]): + try: + config.load_kube_config( + config_file=stage_outputs["stages/02-infrastructure"][ + "kubeconfig_filename" + ]["value"] + ) + api_instance = client.ApiClient() + except ApiException: + print( + f"ERROR: After stage={self.name} " + "unable to connect to kubernetes cluster" + ) + sys.exit(1) + return api_instance + + def input_vars(self, stage_outputs: Dict[str, Dict[str, Any]]): + return {} + + def set_outputs( + self, stage_outputs: Dict[str, Dict[str, Any]], outputs: Dict[str, Any] + ): + stage_key = "stages/" + self.name + if stage_key not in stage_outputs: + stage_outputs[stage_key] = {**outputs} + else: + stage_outputs[stage_key].update(outputs) + + def check( + self, stage_outputs: Dict[str, Dict[str, Any]], disable_prompt: bool = False + ): + + if self.failed_to_create: + print( + f"ERROR: After stage={self.name} " + f"failed to create kubernetes resources" + f"with error: {self.error_message}" + ) + sys.exit(1) + + def render(self) -> Dict[pathlib.Path, str]: + env = Environment(loader=FileSystemLoader(self.template_directory)) + + contents = {} + if not (self.template_directory / KUSTOMIZATION_TEMPLATE).exists(): + raise FileNotFoundError( + f"ERROR: After stage={self.name} " + f"{KUSTOMIZATION_TEMPLATE} template file not found in template directory" + ) + kustomize_template = env.get_template(KUSTOMIZATION_TEMPLATE) + rendered_kustomization = kustomize_template.render(**self.kustomize_vars) + with open(self.template_directory / "kustomization.yaml", "w") as f: + f.write(rendered_kustomization) + + with tempfile.TemporaryDirectory() as temp_dir: + kustomize.run_kustomize_subprocess( + [ + "build", + "-o", + f"{temp_dir}", + "--enable-helm", + "--helm-command", + f"{helm.download_helm_binary()}", + f"{self.template_directory}", + ] + ) + + # copy crds from the template directory to the temp directory + crds = self.template_directory.glob("charts/*/*/crds/*.yaml") + for crd in crds: + with crd.open("rb") as f: + contents[ + pathlib.Path( + self.stage_prefix, + "crds", + crd.name, + ) + ] = f.read() + + for root, _, filenames in os.walk(temp_dir): + for filename in filenames: + root_filename = pathlib.Path(root) / filename + with root_filename.open("rb") as f: + contents[ + pathlib.Path( + self.stage_prefix, + "manifests", + pathlib.Path.relative_to( + pathlib.Path(root_filename), temp_dir + ), + ) + ] = f.read() + # cleanup generated kustomization.yaml + pathlib.Path(self.template_directory, "kustomization.yaml").unlink() + + # clean up downloaded helm charts + charts_dir = pathlib.Path(self.template_directory, "charts") + if charts_dir.exists(): + shutil.rmtree(charts_dir) + + return contents + + # implement the deploy method by taking all of the kubernetes manifests + # from the manifests sub folder and applying them to the kubernetes + # cluster using the kubernetes python client in order + @contextlib.contextmanager + def deploy( + self, stage_outputs: Dict[str, Dict[str, Any]], disable_prompt: bool = False + ): + + print(f"Deploying kubernetes resources for {self.name}") + # get the kubernetes client + kubernetes_client = self._get_k8s_client(stage_outputs) + + # get the path to the manifests folder + directory = pathlib.Path(self.output_directory, self.stage_prefix) + + # get the list of all the files in the crds folder + crds = directory.glob("crds/*.yaml") + + # get the list of all the files in the manifests folder + manifests = directory.glob("manifests/*.yaml") + + # apply each crd to the kubernetes cluster in alphabetical order + for crd in sorted(crds): + print(f"CRD: {crd}") + try: + kubernetes.create_from_yaml(kubernetes_client, crd, apply=True) + except ApiException as e: + self.failed_to_create = True + self.error_message = str(e) + print(f"Applied CRD: {crd}") + + # apply each manifest to the kubernetes cluster in alphabetical order + for manifest in sorted(manifests): + print(f"manifest: {manifest}") + try: + kubernetes.create_from_yaml( + kubernetes_client, + manifest, + namespace=self.config.namespace, + apply=True, + ) + except ApiException as e: + self.failed_to_create = True + self.error_message = str(e) + print(f"Applied manifest: {manifest}") + yield + + @contextlib.contextmanager + def destroy( + self, + stage_outputs: Dict[str, Dict[str, Any]], + status: Dict[str, bool], + ignore_errors: bool = True, + ): + # destroy each manifest in the reverse order + print(f"Destroying kubernetes resources for {self.name}") + + # get the kubernetes client + kubernetes_client = self._get_k8s_client(stage_outputs) + + # get the path to the manifests folder + directory = pathlib.Path(self.output_directory, self.stage_prefix) + + # get the list of all the files in the crds folder + crds = directory.glob("crds/*.yaml") + + # get the list of all the files in the manifests folder + manifests = directory.glob("manifests/*.yaml") + + # destroy each manifest in the reverse order + + for manifest in sorted(manifests, reverse=True): + + print(f"Destroyed manifest: {manifest}") + try: + kubernetes.delete_from_yaml(kubernetes_client, manifest) + except ApiException as e: + self.error_message = str(e) + if not ignore_errors: + raise e + + # destroy each crd in the reverse order + + for crd in sorted(crds, reverse=True): + + print(f"Destroyed CRD: {crd}") + try: + kubernetes.delete_from_yaml(kubernetes_client, crd) + except ApiException as e: + self.error_message = str(e) + if not ignore_errors: + raise e + yield + class NebariTerraformStage(NebariStage): @property @@ -58,11 +280,15 @@ def set_outputs( @contextlib.contextmanager def deploy( - self, stage_outputs: Dict[str, Dict[str, Any]], disable_prompt: bool = False + self, + stage_outputs: Dict[str, Dict[str, Any]], + disable_prompt: bool = False, + terraform_init: bool = True, ): deploy_config = dict( directory=str(self.output_directory / self.stage_prefix), input_vars=self.input_vars(stage_outputs), + terraform_init=terraform_init, ) state_imports = self.state_imports() if state_imports: diff --git a/src/_nebari/stages/infrastructure/__init__.py b/src/_nebari/stages/infrastructure/__init__.py index 8b188a720b..026f33fe82 100644 --- a/src/_nebari/stages/infrastructure/__init__.py +++ b/src/_nebari/stages/infrastructure/__init__.py @@ -1,11 +1,12 @@ import contextlib +import enum import inspect import os import pathlib import re import sys import tempfile -from typing import Annotated, Any, Dict, List, Optional, Tuple, Type, Union +from typing import Annotated, Any, Dict, List, Literal, Optional, Tuple, Type, Union from pydantic import Field, field_validator, model_validator @@ -18,6 +19,7 @@ google_cloud, ) from _nebari.stages.base import NebariTerraformStage +from _nebari.stages.kubernetes_services import SharedFsEnum from _nebari.stages.tf_objects import NebariTerraformState from _nebari.utils import ( AZURE_NODE_RESOURCE_GROUP_SUFFIX, @@ -73,6 +75,16 @@ class GCPPrivateClusterConfig(schema.Base): master_ipv4_cidr_block: str +@schema.yaml_object(schema.yaml) +class GCPNodeGroupImageTypeEnum(str, enum.Enum): + UBUNTU_CONTAINERD = "UBUNTU_CONTAINERD" + COS_CONTAINERD = "COS_CONTAINERD" + + @classmethod + def to_yaml(cls, representer, node): + return representer.represent_str(node.value) + + class GCPInputVars(schema.Base): name: str environment: str @@ -90,6 +102,7 @@ class GCPInputVars(schema.Base): ip_allocation_policy: Optional[Dict[str, str]] = None master_authorized_networks_config: Optional[Dict[str, str]] = None private_cluster_config: Optional[GCPPrivateClusterConfig] = None + node_group_image_type: GCPNodeGroupImageTypeEnum = None class AzureNodeGroupInputVars(schema.Base): @@ -115,6 +128,17 @@ class AzureInputVars(schema.Base): workload_identity_enabled: bool = False +class AWSAmiTypes(enum.Enum): + AL2_x86_64 = "AL2_x86_64" + AL2_x86_64_GPU = "AL2_x86_64_GPU" + CUSTOM = "CUSTOM" + + +class AWSNodeLaunchTemplate(schema.Base): + pre_bootstrap_command: Optional[str] = None + ami_id: Optional[str] = None + + class AWSNodeGroupInputVars(schema.Base): name: str instance_type: str @@ -124,6 +148,28 @@ class AWSNodeGroupInputVars(schema.Base): max_size: int single_subnet: bool permissions_boundary: Optional[str] = None + ami_type: Optional[AWSAmiTypes] = None + launch_template: Optional[AWSNodeLaunchTemplate] = None + + @field_validator("ami_type", mode="before") + @classmethod + def _infer_and_validate_ami_type(cls, value, values) -> str: + gpu_enabled = values.get("gpu", False) + + # Auto-set ami_type if not provided + if not value: + if values.get("launch_template") and values["launch_template"].ami_id: + return "CUSTOM" + if gpu_enabled: + return "AL2_x86_64_GPU" + return "AL2_x86_64" + + # Explicit validation + if value == "AL2_x86_64" and gpu_enabled: + raise ValueError( + "ami_type 'AL2_x86_64' cannot be used with GPU enabled (gpu=True)." + ) + return value class AWSInputVars(schema.Base): @@ -133,12 +179,16 @@ class AWSInputVars(schema.Base): existing_subnet_ids: Optional[List[str]] = None region: str kubernetes_version: str + eks_endpoint_access: Optional[ + Literal["private", "public", "public_and_private"] + ] = "public" node_groups: List[AWSNodeGroupInputVars] availability_zones: List[str] vpc_cidr_block: str permissions_boundary: Optional[str] = None kubeconfig_filename: str = get_kubeconfig_filename() tags: Dict[str, str] = {} + efs_enabled: bool def _calculate_asg_node_group_map(config: schema.Main): @@ -174,7 +224,7 @@ def _calculate_node_groups(config: schema.Main): for group in ["general", "user", "worker"] } elif config.provider == schema.ProviderEnum.existing: - return config.existing.node_selectors + return config.existing.model_dump()["node_selectors"] else: return config.local.model_dump()["node_selectors"] @@ -315,7 +365,7 @@ class GCPNodeGroup(schema.Base): DEFAULT_GCP_NODE_GROUPS = { - "general": GCPNodeGroup(instance="e2-highmem-4", min_nodes=1, max_nodes=1), + "general": GCPNodeGroup(instance="e2-standard-8", min_nodes=1, max_nodes=1), "user": GCPNodeGroup(instance="e2-standard-4", min_nodes=0, max_nodes=5), "worker": GCPNodeGroup(instance="e2-standard-4", min_nodes=0, max_nodes=5), } @@ -339,11 +389,10 @@ class GoogleCloudPlatformProvider(schema.Base): @model_validator(mode="before") @classmethod def _check_input(cls, data: Any) -> Any: - google_cloud.check_credentials() - avaliable_regions = google_cloud.regions() - if data["region"] not in avaliable_regions: + available_regions = google_cloud.regions() + if data["region"] not in available_regions: raise ValueError( - f"Google Cloud region={data['region']} is not one of {avaliable_regions}" + f"Google Cloud region={data['region']} is not one of {available_regions}" ) available_kubernetes_versions = google_cloud.kubernetes_versions(data["region"]) @@ -433,6 +482,7 @@ class AWSNodeGroup(schema.Base): gpu: bool = False single_subnet: bool = False permissions_boundary: Optional[str] = None + launch_template: Optional[AWSNodeLaunchTemplate] = None DEFAULT_AWS_NODE_GROUPS = { @@ -451,6 +501,9 @@ class AmazonWebServicesProvider(schema.Base): kubernetes_version: str availability_zones: Optional[List[str]] node_groups: Dict[str, AWSNodeGroup] = DEFAULT_AWS_NODE_GROUPS + eks_endpoint_access: Optional[ + Literal["private", "public", "public_and_private"] + ] = "public" existing_subnet_ids: Optional[List[str]] = None existing_security_group_id: Optional[str] = None vpc_cidr_block: str = "10.10.0.0/16" @@ -506,6 +559,7 @@ def _check_input(cls, data: Any) -> Any: raise ValueError( f"Amazon Web Services instance {node_group.instance} not one of available instance types={available_instances}" ) + return data @@ -584,16 +638,16 @@ def check_provider(cls, data: Any) -> Any: f"'{provider}' is not a valid enumeration member; permitted: local, existing, do, aws, gcp, azure" ) else: - setted_providers = [ + set_providers = [ provider for provider in provider_name_abbreviation_map.keys() if provider in data ] - num_providers = len(setted_providers) + num_providers = len(set_providers) if num_providers > 1: - raise ValueError(f"Multiple providers set: {setted_providers}") + raise ValueError(f"Multiple providers set: {set_providers}") elif num_providers == 1: - data["provider"] = provider_name_abbreviation_map[setted_providers[0]] + data["provider"] = provider_name_abbreviation_map[set_providers[0]] elif num_providers == 0: data["provider"] = schema.ProviderEnum.local.value return data @@ -606,7 +660,7 @@ class NodeSelectorKeyValue(schema.Base): class KubernetesCredentials(schema.Base): host: str - cluster_ca_certifiate: str + cluster_ca_certificate: str token: Optional[str] = None username: Optional[str] = None password: Optional[str] = None @@ -752,6 +806,11 @@ def input_vars(self, stage_outputs: Dict[str, Dict[str, Any]]): ip_allocation_policy=self.config.google_cloud_platform.ip_allocation_policy, master_authorized_networks_config=self.config.google_cloud_platform.master_authorized_networks_config, private_cluster_config=self.config.google_cloud_platform.private_cluster_config, + node_group_image_type=( + GCPNodeGroupImageTypeEnum.UBUNTU_CONTAINERD + if self.config.storage.type == SharedFsEnum.cephfs + else GCPNodeGroupImageTypeEnum.COS_CONTAINERD + ), ).model_dump() elif self.config.provider == schema.ProviderEnum.azure: return AzureInputVars( @@ -789,6 +848,7 @@ def input_vars(self, stage_outputs: Dict[str, Dict[str, Any]]): return AWSInputVars( name=self.config.escaped_project_name, environment=self.config.namespace, + eks_endpoint_access=self.config.amazon_web_services.eks_endpoint_access, existing_subnet_ids=self.config.amazon_web_services.existing_subnet_ids, existing_security_group_id=self.config.amazon_web_services.existing_security_group_id, region=self.config.amazon_web_services.region, @@ -803,6 +863,7 @@ def input_vars(self, stage_outputs: Dict[str, Dict[str, Any]]): max_size=node_group.max_nodes, single_subnet=node_group.single_subnet, permissions_boundary=node_group.permissions_boundary, + launch_template=node_group.launch_template, ) for name, node_group in self.config.amazon_web_services.node_groups.items() ], @@ -810,6 +871,7 @@ def input_vars(self, stage_outputs: Dict[str, Dict[str, Any]]): vpc_cidr_block=self.config.amazon_web_services.vpc_cidr_block, permissions_boundary=self.config.amazon_web_services.permissions_boundary, tags=self.config.amazon_web_services.tags, + efs_enabled=self.config.storage.type == SharedFsEnum.efs, ).model_dump() else: raise ValueError(f"Unknown provider: {self.config.provider}") diff --git a/src/_nebari/stages/infrastructure/template/aws/main.tf b/src/_nebari/stages/infrastructure/template/aws/main.tf index 2c78018f0b..feffd35291 100644 --- a/src/_nebari/stages/infrastructure/template/aws/main.tf +++ b/src/_nebari/stages/infrastructure/template/aws/main.tf @@ -64,6 +64,7 @@ module "registry-jupyterlab" { # ====================== EFS ========================= module "efs" { + count = var.efs_enabled ? 1 : 0 source = "./modules/efs" name = "${local.cluster_name}-jupyterhub-shared" @@ -73,6 +74,10 @@ module "efs" { efs_security_groups = [local.security_group_id] } +moved { + from = module.efs + to = module.efs[0] +} # ==================== KUBERNETES ===================== module "kubernetes" { @@ -92,7 +97,8 @@ module "kubernetes" { node_groups = var.node_groups - endpoint_private_access = var.eks_endpoint_private_access + endpoint_public_access = var.eks_endpoint_access == "private" ? false : true + endpoint_private_access = var.eks_endpoint_access == "public" ? false : true public_access_cidrs = var.eks_public_access_cidrs permissions_boundary = var.permissions_boundary } diff --git a/src/_nebari/stages/infrastructure/template/aws/modules/kafka/main.tf b/src/_nebari/stages/infrastructure/template/aws/modules/kafka/main.tf deleted file mode 100644 index 877a5955be..0000000000 --- a/src/_nebari/stages/infrastructure/template/aws/modules/kafka/main.tf +++ /dev/null @@ -1,29 +0,0 @@ -resource "aws_kms_key" "main" { - description = var.name - - tags = merge({ Name = var.name }, var.tags) -} - -resource "aws_msk_cluster" "main" { - cluster_name = var.name - kafka_version = var.kafka_version - number_of_broker_nodes = var.kafka_instance_count - - broker_node_group_info { - instance_type = var.kafka_instance_type - ebs_volume_size = var.kafka_ebs_volume_size - client_subnets = var.kafka_vpc_subnets - security_groups = var.kafka_security_groups - } - - encryption_info { - encryption_at_rest_kms_key_arn = aws_kms_key.main.arn - - encryption_in_transit { - client_broker = "TLS" - in_cluster = true - } - } - - tags = merge({ Name = var.name }, var.tags) -} diff --git a/src/_nebari/stages/infrastructure/template/aws/modules/kafka/outputs.tf b/src/_nebari/stages/infrastructure/template/aws/modules/kafka/outputs.tf deleted file mode 100644 index 0b2189a934..0000000000 --- a/src/_nebari/stages/infrastructure/template/aws/modules/kafka/outputs.tf +++ /dev/null @@ -1,8 +0,0 @@ -output "credentials" { - description = "Important credentials for connecting to MSK cluster" - value = { - zookeeper_host = aws_msk_cluster.main.zookeeper_connect_string - bootstrap_brokers = aws_msk_cluster.main.bootstrap_brokers - bootstrap_brokers_tls = aws_msk_cluster.main.bootstrap_brokers_tls - } -} diff --git a/src/_nebari/stages/infrastructure/template/aws/modules/kafka/variables.tf b/src/_nebari/stages/infrastructure/template/aws/modules/kafka/variables.tf deleted file mode 100644 index 14d3f8b4a3..0000000000 --- a/src/_nebari/stages/infrastructure/template/aws/modules/kafka/variables.tf +++ /dev/null @@ -1,44 +0,0 @@ -variable "name" { - description = "Name to give kafka cluster" - type = string -} - -variable "tags" { - description = "Tags for kafka cluster" - type = map(string) - default = {} -} - -variable "kafka_version" { - description = "Kafka server version" - type = string - default = "2.3.1" -} - -variable "kafka_instance_count" { - description = "Number of nodes to run Kafka cluster on" - type = number - default = 2 -} - -variable "kafka_instance_type" { - description = "AWS Instance type to run Kafka cluster on" - type = string - default = "kafka.m5.large" -} - -variable "kafka_ebs_volume_size" { - description = "AWS EBS volume size (GB) to use for Kafka broker storage" - type = number - default = 100 -} - -variable "kafka_vpc_subnets" { - description = "Kafka VPC subnets to run cluster on" - type = list(string) -} - -variable "kafka_security_groups" { - description = "Kafka security groups to run cluster on" - type = list(string) -} diff --git a/src/_nebari/stages/infrastructure/template/aws/modules/kubernetes/files/user_data.tftpl b/src/_nebari/stages/infrastructure/template/aws/modules/kubernetes/files/user_data.tftpl new file mode 100644 index 0000000000..278e9a6270 --- /dev/null +++ b/src/_nebari/stages/infrastructure/template/aws/modules/kubernetes/files/user_data.tftpl @@ -0,0 +1,20 @@ +MIME-Version: 1.0 +Content-Type: multipart/mixed; boundary="//" + +%{ if node_pre_bootstrap_command != null } +--// +Content-Type: text/x-shellscript; charset="us-ascii" + +${node_pre_bootstrap_command} +%{ endif } + +%{ if include_bootstrap_cmd } +--// +Content-Type: text/x-shellscript; charset="us-ascii" +#!/bin/bash +set -ex + +/etc/eks/bootstrap.sh ${cluster_name} --b64-cluster-ca ${cluster_cert_authority} --apiserver-endpoint ${cluster_endpoint} +%{ endif } + + --// diff --git a/src/_nebari/stages/infrastructure/template/aws/modules/kubernetes/main.tf b/src/_nebari/stages/infrastructure/template/aws/modules/kubernetes/main.tf index 43e5538507..5b66201f83 100644 --- a/src/_nebari/stages/infrastructure/template/aws/modules/kubernetes/main.tf +++ b/src/_nebari/stages/infrastructure/template/aws/modules/kubernetes/main.tf @@ -8,7 +8,8 @@ resource "aws_eks_cluster" "main" { vpc_config { security_group_ids = var.cluster_security_groups subnet_ids = var.cluster_subnets - + #trivy:ignore:AVD-AWS-0040 + endpoint_public_access = var.endpoint_public_access endpoint_private_access = var.endpoint_private_access public_access_cidrs = var.public_access_cidrs } @@ -20,6 +21,52 @@ resource "aws_eks_cluster" "main" { tags = merge({ Name = var.name }, var.tags) } +## aws_launch_template user_data invocation +## If using a Custom AMI, then the /etc/eks/bootstrap cmds and args must be included/modified, +## otherwise, on default AWS EKS Node AMI, the bootstrap cmd is appended automatically +resource "aws_launch_template" "main" { + for_each = { + for node_group in var.node_groups : + node_group.name => node_group + if node_group.launch_template != null + } + + name_prefix = "eks-${var.name}-${each.value.name}-" + image_id = each.value.launch_template.ami_id + + vpc_security_group_ids = var.cluster_security_groups + + + metadata_options { + http_tokens = "required" + http_endpoint = "enabled" + instance_metadata_tags = "enabled" + } + + block_device_mappings { + device_name = "/dev/xvda" + ebs { + volume_size = 50 + volume_type = "gp2" + } + } + + # https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-basics + user_data = base64encode( + templatefile( + "${path.module}/files/user_data.tftpl", + { + node_pre_bootstrap_command = each.value.launch_template.pre_bootstrap_command + # This will ensure the bootstrap user data is used to join the node + include_bootstrap_cmd = each.value.launch_template.ami_id != null ? true : false + cluster_name = aws_eks_cluster.main.name + cluster_cert_authority = aws_eks_cluster.main.certificate_authority[0].data + cluster_endpoint = aws_eks_cluster.main.endpoint + } + ) + ) +} + resource "aws_eks_node_group" "main" { count = length(var.node_groups) @@ -30,8 +77,8 @@ resource "aws_eks_node_group" "main" { subnet_ids = var.node_groups[count.index].single_subnet ? [element(var.cluster_subnets, 0)] : var.cluster_subnets instance_types = [var.node_groups[count.index].instance_type] - ami_type = var.node_groups[count.index].gpu == true ? "AL2_x86_64_GPU" : "AL2_x86_64" - disk_size = 50 + ami_type = var.node_groups[count.index].ami_type + disk_size = var.node_groups[count.index].launch_template == null ? 50 : null scaling_config { min_size = var.node_groups[count.index].min_size @@ -39,6 +86,15 @@ resource "aws_eks_node_group" "main" { max_size = var.node_groups[count.index].max_size } + # Only set launch_template if its node_group counterpart parameter is not null + dynamic "launch_template" { + for_each = var.node_groups[count.index].launch_template != null ? [0] : [] + content { + id = aws_launch_template.main[var.node_groups[count.index].name].id + version = aws_launch_template.main[var.node_groups[count.index].name].latest_version + } + } + labels = { "dedicated" = var.node_groups[count.index].name } @@ -89,8 +145,11 @@ resource "aws_eks_addon" "aws-ebs-csi-driver" { } resource "aws_eks_addon" "coredns" { - addon_name = "coredns" - cluster_name = aws_eks_cluster.main.name + addon_name = "coredns" + cluster_name = aws_eks_cluster.main.name + resolve_conflicts_on_create = "OVERWRITE" + resolve_conflicts_on_update = "OVERWRITE" + configuration_values = jsonencode({ nodeSelector = { diff --git a/src/_nebari/stages/infrastructure/template/aws/modules/kubernetes/variables.tf b/src/_nebari/stages/infrastructure/template/aws/modules/kubernetes/variables.tf index e22c640929..4d38d10a19 100644 --- a/src/_nebari/stages/infrastructure/template/aws/modules/kubernetes/variables.tf +++ b/src/_nebari/stages/infrastructure/template/aws/modules/kubernetes/variables.tf @@ -44,13 +44,15 @@ variable "node_group_additional_policies" { variable "node_groups" { description = "Node groups to add to EKS Cluster" type = list(object({ - name = string - instance_type = string - gpu = bool - min_size = number - desired_size = number - max_size = number - single_subnet = bool + name = string + instance_type = string + gpu = bool + min_size = number + desired_size = number + max_size = number + single_subnet = bool + launch_template = map(any) + ami_type = string })) } @@ -60,6 +62,11 @@ variable "node_group_instance_type" { default = "m5.large" } +variable "endpoint_public_access" { + type = bool + default = true +} + variable "endpoint_private_access" { type = bool default = false diff --git a/src/_nebari/stages/infrastructure/template/aws/modules/permissions/main.tf b/src/_nebari/stages/infrastructure/template/aws/modules/permissions/main.tf deleted file mode 100644 index ac88990c40..0000000000 --- a/src/_nebari/stages/infrastructure/template/aws/modules/permissions/main.tf +++ /dev/null @@ -1,36 +0,0 @@ -resource "aws_iam_user" "main" { - name = var.name - - tags = merge({ Name = var.name }, var.tags) -} - -resource "aws_iam_access_key" "main" { - user = aws_iam_user.main.name -} - -data "aws_iam_policy_document" "main" { - depends_on = [ - aws_iam_user.main, - aws_iam_access_key.main - ] - - statement { - sid = "1" - - effect = "Allow" - - actions = var.allowed_policy_actions - resources = var.allowed_policy_resources - } -} - -resource "aws_iam_policy" "main" { - name = var.name - path = "/" - policy = data.aws_iam_policy_document.main.json -} - -resource "aws_iam_user_policy_attachment" "main" { - user = aws_iam_user.main.name - policy_arn = aws_iam_policy.main.arn -} diff --git a/src/_nebari/stages/infrastructure/template/aws/modules/permissions/outputs.tf b/src/_nebari/stages/infrastructure/template/aws/modules/permissions/outputs.tf deleted file mode 100644 index 9153e6223f..0000000000 --- a/src/_nebari/stages/infrastructure/template/aws/modules/permissions/outputs.tf +++ /dev/null @@ -1,11 +0,0 @@ -output "credentials" { - description = "Information about specific AWS IAM user" - value = { - user_arn = aws_iam_user.main.arn, - username = aws_iam_user.main.name, - access_key = aws_iam_access_key.main.id, - secret_key = aws_iam_access_key.main.secret - allowed_policies = var.allowed_policy_actions, - allowed_resources = var.allowed_policy_resources - } -} diff --git a/src/_nebari/stages/infrastructure/template/aws/modules/permissions/variables.tf b/src/_nebari/stages/infrastructure/template/aws/modules/permissions/variables.tf deleted file mode 100644 index 16e3d65bf8..0000000000 --- a/src/_nebari/stages/infrastructure/template/aws/modules/permissions/variables.tf +++ /dev/null @@ -1,22 +0,0 @@ -variable "name" { - description = "Prefix name to use to annotate permission resources" - type = string -} - -variable "tags" { - description = "AWS iam additional tags" - type = map(string) - default = {} -} - -variable "allowed_policy_actions" { - description = "Actions to allow IAM user to perform" - type = list(string) - default = [] -} - -variable "allowed_policy_resources" { - description = "Allowed AWS arns for user to have access to" - type = list(string) - default = [] -} diff --git a/src/_nebari/stages/infrastructure/template/aws/modules/rds/main.tf b/src/_nebari/stages/infrastructure/template/aws/modules/rds/main.tf deleted file mode 100644 index e50beb2b1b..0000000000 --- a/src/_nebari/stages/infrastructure/template/aws/modules/rds/main.tf +++ /dev/null @@ -1,39 +0,0 @@ -resource "aws_rds_cluster" "main" { - cluster_identifier = var.name - - engine = var.rds_database_engine - - database_name = var.database_master.database - master_username = var.database_master.username - master_password = var.database_master.password - - backup_retention_period = 5 - preferred_backup_window = "07:00-09:00" - skip_final_snapshot = true - iam_database_authentication_enabled = true - - # NOTE - this should be removed when not in dev mode to reduce risk - # of downtime - apply_immediately = true - - tags = merge({ - Name = var.name - Description = "RDS database for ${var.name}-rds-cluster" - }, var.tags) -} - -resource "aws_rds_cluster_instance" "main" { - count = 1 - identifier = "${var.name}-cluster-instance-${count.index}" - - cluster_identifier = aws_rds_cluster.main.id - instance_class = var.rds_instance_type - publicly_accessible = true - - engine = var.rds_database_engine - - tags = merge({ - Name = "${var.name}-cluster-instance-${count.index}" - Description = "RDS database for ${var.name}-rds-cluster instances" - }, var.tags) -} diff --git a/src/_nebari/stages/infrastructure/template/aws/modules/rds/outputs.tf b/src/_nebari/stages/infrastructure/template/aws/modules/rds/outputs.tf deleted file mode 100644 index fb8c1db95a..0000000000 --- a/src/_nebari/stages/infrastructure/template/aws/modules/rds/outputs.tf +++ /dev/null @@ -1,15 +0,0 @@ -output "credentials" { - description = "connection string for master database connection" - value = { - arn = aws_rds_cluster.main.arn - username = aws_rds_cluster.main.master_username - password = aws_rds_cluster.main.master_password - database = aws_rds_cluster.main.database_name - host = aws_rds_cluster.main.endpoint - port = aws_rds_cluster.main.port - } -} - -# output "aws_postgresql_user_connections" { -# description = "Database connections and iam users for each database" -# } diff --git a/src/_nebari/stages/infrastructure/template/aws/modules/rds/users.tf b/src/_nebari/stages/infrastructure/template/aws/modules/rds/users.tf deleted file mode 100644 index d93c4263e0..0000000000 --- a/src/_nebari/stages/infrastructure/template/aws/modules/rds/users.tf +++ /dev/null @@ -1,51 +0,0 @@ -# # ======================================================= -# # AWS RDS + IAM Policy Setup -# # ======================================================= - -# resource "aws_iam_user" "psql_user" { -# count = length(var.postgresql_additional_users) - -# name = "${var.name}-psql" -# } - -# resource "aws_iam_access_key" "psql_user" { -# user = aws_iam_user.psql_user.name -# } - -# output "psql_user_secret" { -# description = "PSQL User Access Keys" -# value = aws_iam_access_key.psql_user.encrypted_secret -# } - -# data "aws_iam_policy_document" "psql" { -# depends_on = [ -# aws_rds_cluster.postgresql -# ] - -# statement { -# sid = "1" - -# effect = "Allow" - -# actions = [ -# "rds-db:connect" -# ] - -# # should username be included with arn? var.postgresql_user? -# resources = concat( -# [ aws_rds_cluster.postgresql.arn ], -# aws_rds_cluster_instance.postgresql[*].arn -# ) -# } -# } - -# resource "aws_iam_policy" "psql" { -# name = "${var.name}-psql" -# path = "/" -# policy = data.aws_iam_policy_document.psql.json -# } - -# resource "aws_iam_user_policy_attachment" "psql_attach" { -# user = aws_iam_user.psql_user.name -# policy_arn = aws_iam_policy.psql.arn -# } diff --git a/src/_nebari/stages/infrastructure/template/aws/modules/rds/variables.tf b/src/_nebari/stages/infrastructure/template/aws/modules/rds/variables.tf deleted file mode 100644 index e49b8ba822..0000000000 --- a/src/_nebari/stages/infrastructure/template/aws/modules/rds/variables.tf +++ /dev/null @@ -1,37 +0,0 @@ -variable "name" { - description = "Prefix name to assign to AWS RDS postgresql database" - type = string -} - -variable "tags" { - description = "Additional tags to assign to AWS RDS postgresql database" - type = map(string) - default = {} -} - -variable "rds_instance_type" { - description = "AWS Instance type for postgresql instance" - type = string - default = "db.r4.large" -} - -variable "rds_number_instances" { - description = "AWS number of rds database instances" - type = number - default = 1 -} - -variable "rds_database_engine" { - description = "aurora-postgresql" - type = string - default = "aurora-postgresql" -} - -variable "database_master" { - description = "AWS RDS master" - type = object({ - username = string - password = string - database = string - }) -} diff --git a/src/_nebari/stages/infrastructure/template/aws/modules/s3/main.tf b/src/_nebari/stages/infrastructure/template/aws/modules/s3/main.tf deleted file mode 100644 index 6f349fbf16..0000000000 --- a/src/_nebari/stages/infrastructure/template/aws/modules/s3/main.tf +++ /dev/null @@ -1,13 +0,0 @@ -resource "aws_s3_bucket" "main" { - bucket = var.name - acl = var.public ? "public-read" : "private" - - versioning { - enabled = true - } - - tags = merge({ - Name = var.name - Description = "S3 bucket for ${var.name}" - }, var.tags) -} diff --git a/src/_nebari/stages/infrastructure/template/aws/modules/s3/outputs.tf b/src/_nebari/stages/infrastructure/template/aws/modules/s3/outputs.tf deleted file mode 100644 index 11f9f58930..0000000000 --- a/src/_nebari/stages/infrastructure/template/aws/modules/s3/outputs.tf +++ /dev/null @@ -1,8 +0,0 @@ -output "credentials" { - description = "Important credentials for connecting to S3 bucket" - value = { - bucket = aws_s3_bucket.main.bucket - bucket_domain_name = aws_s3_bucket.main.bucket_domain_name - arn = aws_s3_bucket.main.arn - } -} diff --git a/src/_nebari/stages/infrastructure/template/aws/modules/s3/variables.tf b/src/_nebari/stages/infrastructure/template/aws/modules/s3/variables.tf deleted file mode 100644 index df14c5b830..0000000000 --- a/src/_nebari/stages/infrastructure/template/aws/modules/s3/variables.tf +++ /dev/null @@ -1,16 +0,0 @@ -variable "name" { - description = "Prefix name for bucket resource" - type = string -} - -variable "tags" { - description = "Additional tags to include with AWS S3 bucket" - type = map(string) - default = {} -} - -variable "public" { - description = "AWS s3 bucket is exposed publicly" - type = bool - default = false -} diff --git a/src/_nebari/stages/infrastructure/template/aws/outputs.tf b/src/_nebari/stages/infrastructure/template/aws/outputs.tf index 08581fe6e2..9c11139498 100644 --- a/src/_nebari/stages/infrastructure/template/aws/outputs.tf +++ b/src/_nebari/stages/infrastructure/template/aws/outputs.tf @@ -22,7 +22,7 @@ output "kubeconfig_filename" { output "nfs_endpoint" { description = "Endpoint for nfs server" - value = module.efs.credentials.dns_name + value = length(module.efs) == 1 ? module.efs[0].credentials.dns_name : null } output "cluster_oidc_issuer_url" { diff --git a/src/_nebari/stages/infrastructure/template/aws/variables.tf b/src/_nebari/stages/infrastructure/template/aws/variables.tf index c07c8f60f2..a3f37b9eb9 100644 --- a/src/_nebari/stages/infrastructure/template/aws/variables.tf +++ b/src/_nebari/stages/infrastructure/template/aws/variables.tf @@ -31,13 +31,15 @@ variable "kubernetes_version" { variable "node_groups" { description = "AWS node groups" type = list(object({ - name = string - instance_type = string - gpu = bool - min_size = number - desired_size = number - max_size = number - single_subnet = bool + name = string + instance_type = string + gpu = bool + min_size = number + desired_size = number + max_size = number + single_subnet = bool + launch_template = map(any) + ami_type = string })) } @@ -47,7 +49,7 @@ variable "availability_zones" { } variable "vpc_cidr_block" { - description = "VPC cidr block for infastructure" + description = "VPC cidr block for infrastructure" type = string } @@ -56,6 +58,12 @@ variable "kubeconfig_filename" { type = string } +variable "eks_endpoint_access" { + description = "EKS cluster api server endpoint access setting" + type = string + default = "public" +} + variable "eks_endpoint_private_access" { type = bool default = false @@ -77,3 +85,8 @@ variable "tags" { type = map(string) default = {} } + +variable "efs_enabled" { + description = "Enable EFS" + type = bool +} diff --git a/src/_nebari/stages/infrastructure/template/azure/modules/kubernetes/main.tf b/src/_nebari/stages/infrastructure/template/azure/modules/kubernetes/main.tf index cd39488309..f093f048c6 100644 --- a/src/_nebari/stages/infrastructure/template/azure/modules/kubernetes/main.tf +++ b/src/_nebari/stages/infrastructure/template/azure/modules/kubernetes/main.tf @@ -31,13 +31,12 @@ resource "azurerm_kubernetes_cluster" "main" { default_node_pool { vnet_subnet_id = var.vnet_subnet_id name = var.node_groups[0].name - node_count = 1 vm_size = var.node_groups[0].instance_type enable_auto_scaling = "true" - min_count = 1 - max_count = 1 + min_count = var.node_groups[0].min_size + max_count = var.node_groups[0].max_size max_pods = var.max_pods - # node_labels = var.node_labels + orchestrator_version = var.kubernetes_version node_labels = { "azure-node-pool" = var.node_groups[0].name @@ -54,39 +53,30 @@ resource "azurerm_kubernetes_cluster" "main" { type = "SystemAssigned" # "UserAssigned" or "SystemAssigned". SystemAssigned identity lifecycles are tied to the AKS Cluster. } + lifecycle { + ignore_changes = [ + # We ignore changes since otherwise, the AKS cluster unsets this default value every time you deploy. + # https://github.com/hashicorp/terraform-provider-azurerm/issues/24020#issuecomment-1887670287 + default_node_pool[0].upgrade_settings, + ] + } + } # https://registry.terraform.io/providers/hashicorp/azurerm/latest/docs/resources/kubernetes_cluster_node_pool -resource "azurerm_kubernetes_cluster_node_pool" "user_node_group" { - name = var.node_groups[1].name - kubernetes_cluster_id = azurerm_kubernetes_cluster.main.id - vm_size = var.node_groups[1].instance_type - node_count = 0 - enable_auto_scaling = "true" - mode = "User" # "System" or "User", only "User" nodes can scale down to 0 - min_count = var.node_groups[1].min_size - max_count = var.node_groups[1].max_size - max_pods = var.max_pods - node_labels = { - "azure-node-pool" = var.node_groups[1].name - } - orchestrator_version = var.kubernetes_version - tags = var.tags - vnet_subnet_id = var.vnet_subnet_id -} +resource "azurerm_kubernetes_cluster_node_pool" "node_group" { + for_each = { for i, group in var.node_groups : i => group if i != 0 } -resource "azurerm_kubernetes_cluster_node_pool" "worker_node_group" { - name = var.node_groups[2].name + name = each.value.name kubernetes_cluster_id = azurerm_kubernetes_cluster.main.id - vm_size = var.node_groups[2].instance_type - node_count = 0 + vm_size = each.value.instance_type enable_auto_scaling = "true" mode = "User" # "System" or "User", only "User" nodes can scale down to 0 - min_count = var.node_groups[2].min_size - max_count = var.node_groups[2].max_size + min_count = each.value.min_size + max_count = each.value.max_size max_pods = var.max_pods node_labels = { - "azure-node-pool" = var.node_groups[2].name + "azure-node-pool" = each.value.name } orchestrator_version = var.kubernetes_version tags = var.tags diff --git a/src/_nebari/stages/infrastructure/template/gcp/main.tf b/src/_nebari/stages/infrastructure/template/gcp/main.tf index 4411cac406..3d23af5571 100644 --- a/src/_nebari/stages/infrastructure/template/gcp/main.tf +++ b/src/_nebari/stages/infrastructure/template/gcp/main.tf @@ -36,4 +36,5 @@ module "kubernetes" { release_channel = var.release_channel tags = var.tags labels = var.labels + node_group_image_type = var.node_group_image_type } diff --git a/src/_nebari/stages/infrastructure/template/gcp/modules/kubernetes/main.tf b/src/_nebari/stages/infrastructure/template/gcp/modules/kubernetes/main.tf index c4b18f32ad..57e8d9fc88 100644 --- a/src/_nebari/stages/infrastructure/template/gcp/modules/kubernetes/main.tf +++ b/src/_nebari/stages/infrastructure/template/gcp/modules/kubernetes/main.tf @@ -87,6 +87,7 @@ resource "google_container_node_pool" "main" { node_config { preemptible = local.merged_node_groups[count.index].preemptible machine_type = local.merged_node_groups[count.index].instance_type + image_type = var.node_group_image_type service_account = google_service_account.main.email diff --git a/src/_nebari/stages/infrastructure/template/gcp/modules/kubernetes/outputs.tf b/src/_nebari/stages/infrastructure/template/gcp/modules/kubernetes/outputs.tf index bfb5463295..513294aac7 100644 --- a/src/_nebari/stages/infrastructure/template/gcp/modules/kubernetes/outputs.tf +++ b/src/_nebari/stages/infrastructure/template/gcp/modules/kubernetes/outputs.tf @@ -1,5 +1,5 @@ output "credentials" { - description = "Credentials required for connecting to kubernets cluster" + description = "Credentials required for connecting to kubernetes cluster" sensitive = true value = { endpoint = "https://${google_container_cluster.main.endpoint}" diff --git a/src/_nebari/stages/infrastructure/template/gcp/modules/kubernetes/variables.tf b/src/_nebari/stages/infrastructure/template/gcp/modules/kubernetes/variables.tf index cef5363030..2ee2d78ed5 100644 --- a/src/_nebari/stages/infrastructure/template/gcp/modules/kubernetes/variables.tf +++ b/src/_nebari/stages/infrastructure/template/gcp/modules/kubernetes/variables.tf @@ -156,3 +156,16 @@ variable "labels" { type = map(string) default = {} } + +variable "node_group_image_type" { + description = "The image type to use for the node groups" + type = string + default = null + + validation { + # Only 2 values are valid according to docs + # https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/container_cluster#image_type + condition = var.node_group_image_type == null || contains(["COS_CONTAINERD", "UBUNTU_CONTAINERD"], var.node_group_image_type) + error_message = "Allowed values for input_parameter are \"COS_CONTAINERD\" or \"UBUNTU_CONTAINERD\"." + } +} diff --git a/src/_nebari/stages/infrastructure/template/gcp/variables.tf b/src/_nebari/stages/infrastructure/template/gcp/variables.tf index a0de29d0b9..5a280e59c9 100644 --- a/src/_nebari/stages/infrastructure/template/gcp/variables.tf +++ b/src/_nebari/stages/infrastructure/template/gcp/variables.tf @@ -99,3 +99,16 @@ variable "private_cluster_config" { master_ipv4_cidr_block = string })) } + +variable "node_group_image_type" { + description = "The image type to use for the node groups" + type = string + default = null + + validation { + # Only 2 values are valid according to docs + # https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/container_cluster#image_type + condition = var.node_group_image_type == null || contains(["COS_CONTAINERD", "UBUNTU_CONTAINERD"], var.node_group_image_type) + error_message = "Allowed values for input_parameter are \"COS_CONTAINERD\" or \"UBUNTU_CONTAINERD\"." + } +} diff --git a/src/_nebari/stages/kubernetes_ingress/__init__.py b/src/_nebari/stages/kubernetes_ingress/__init__.py index 628d383830..ea5f8fa335 100644 --- a/src/_nebari/stages/kubernetes_ingress/__init__.py +++ b/src/_nebari/stages/kubernetes_ingress/__init__.py @@ -93,17 +93,18 @@ def _attempt_dns_lookup( attempt = 0 while not _attempt_dns_lookup(domain_name, ip): - sleeptime = 60 * (2**attempt) - if not disable_prompt: + if disable_prompt: + sleeptime = 60 * (2**attempt) + print(f"Will attempt to poll DNS again in {sleeptime} seconds...") + time.sleep(sleeptime) + else: input( f"After attempting to poll the DNS, the record for domain={domain_name} appears not to exist, " f"has recently been updated, or has yet to fully propagate. This non-deterministic behavior is likely due to " - f"DNS caching and will likely resolve itself in a few minutes.\n\n\tTo poll the DNS again in {sleeptime} seconds " - f"[Press Enter].\n\n...otherwise kill the process and run the deployment again later..." + f"DNS caching and will likely resolve itself in a few minutes.\n\n\tTo poll the DNS again [Press Enter].\n\n" + f"...otherwise kill the process and run the deployment again later..." ) - print(f"Will attempt to poll DNS again in {sleeptime} seconds...") - time.sleep(sleeptime) attempt += 1 if attempt == 5: print( diff --git a/src/_nebari/stages/kubernetes_keycloak/template/modules/kubernetes/keycloak-helm/values.yaml b/src/_nebari/stages/kubernetes_keycloak/template/modules/kubernetes/keycloak-helm/values.yaml index abe7d4d3e3..bf356145f0 100644 --- a/src/_nebari/stages/kubernetes_keycloak/template/modules/kubernetes/keycloak-helm/values.yaml +++ b/src/_nebari/stages/kubernetes_keycloak/template/modules/kubernetes/keycloak-helm/values.yaml @@ -27,14 +27,18 @@ extraInitContainers: | - sh - -c - | - wget https://github.com/aerogear/keycloak-metrics-spi/releases/download/2.5.3/keycloak-metrics-spi-2.5.3.jar -P /data/ && - export SHA256SUM=9b3f52f842a66dadf5ff3cc3a729b8e49042d32f84510a5d73d41a2e39f29a96 && - if ! (echo "$SHA256SUM /data/keycloak-metrics-spi-2.5.3.jar" | sha256sum -c) - then - echo "Error: Checksum not verified" && exit 1 - else - chown 1000:1000 /data/keycloak-metrics-spi-2.5.3.jar && - chmod 777 /data/keycloak-metrics-spi-2.5.3.jar + if [ ! -f /data/keycloak-metrics-spi-2.5.3.jar ]; then + wget https://github.com/aerogear/keycloak-metrics-spi/releases/download/2.5.3/keycloak-metrics-spi-2.5.3.jar -P /data/ && + export SHA256SUM=9b3f52f842a66dadf5ff3cc3a729b8e49042d32f84510a5d73d41a2e39f29a96 && + if ! (echo "$SHA256SUM /data/keycloak-metrics-spi-2.5.3.jar" | sha256sum -c) + then + echo "Error: Checksum not verified" && exit 1 + else + chown 1000:1000 /data/keycloak-metrics-spi-2.5.3.jar && + chmod 777 /data/keycloak-metrics-spi-2.5.3.jar + fi + else + echo "File /data/keycloak-metrics-spi-2.5.3.jar already exists. Skipping download." fi image: busybox:1.36 name: initialize-spi-metrics-jar diff --git a/src/_nebari/stages/kubernetes_kuberhealthy/__init__.py b/src/_nebari/stages/kubernetes_kuberhealthy/__init__.py new file mode 100644 index 0000000000..af3a178e2e --- /dev/null +++ b/src/_nebari/stages/kubernetes_kuberhealthy/__init__.py @@ -0,0 +1,45 @@ +import contextlib +from typing import Any, Dict, List, Type + +from _nebari.stages.base import NebariKustomizeStage +from nebari import schema +from nebari.hookspecs import NebariStage, hookimpl + + +class InputSchema(schema.Base): + pass + + +class OutputSchema(schema.Base): + pass + + +class KuberHealthyStage(NebariKustomizeStage): + name = "10-kubernetes-kuberhealthy" + priority = 100 + + input_schema = InputSchema + output_schema = OutputSchema + + @property + def kustomize_vars(self): + return { + "namespace": self.config.namespace, + "kuberhealthy_helm_version": self.config.monitoring.healthchecks.kuberhealthy_helm_version, + } + + @contextlib.contextmanager + def deploy( + self, stage_outputs: Dict[str, Dict[str, Any]], disable_prompt: bool = False + ): + if self.config.monitoring.healthchecks.enabled: + with super().deploy(stage_outputs, disable_prompt): + yield + else: + with self.destroy(stage_outputs, {}): + yield + + +@hookimpl +def nebari_stage() -> List[Type[NebariStage]]: + return [KuberHealthyStage] diff --git a/src/_nebari/stages/kubernetes_kuberhealthy/template/kustomization.yaml.tmpl b/src/_nebari/stages/kubernetes_kuberhealthy/template/kustomization.yaml.tmpl new file mode 100644 index 0000000000..204b4a95ec --- /dev/null +++ b/src/_nebari/stages/kubernetes_kuberhealthy/template/kustomization.yaml.tmpl @@ -0,0 +1,15 @@ +# kustomization.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + +helmCharts: +- name: kuberhealthy + repo: https://kuberhealthy.github.io/kuberhealthy/helm-repos + releaseName: kuberhealthy + namespace: {{ namespace }} + version: "{{ kuberhealthy_helm_version }}" + valuesFile: values.yaml + valuesInline: + prometheus: + serviceMonitor: + namespace: {{ namespace }} diff --git a/src/_nebari/stages/kubernetes_kuberhealthy/template/values.yaml b/src/_nebari/stages/kubernetes_kuberhealthy/template/values.yaml new file mode 100644 index 0000000000..abea2f2851 --- /dev/null +++ b/src/_nebari/stages/kubernetes_kuberhealthy/template/values.yaml @@ -0,0 +1,4 @@ +prometheus: + enabled: true + serviceMonitor: + enabled: true diff --git a/src/_nebari/stages/kubernetes_kuberhealthy_healthchecks/__init__.py b/src/_nebari/stages/kubernetes_kuberhealthy_healthchecks/__init__.py new file mode 100644 index 0000000000..3756c88eda --- /dev/null +++ b/src/_nebari/stages/kubernetes_kuberhealthy_healthchecks/__init__.py @@ -0,0 +1,44 @@ +import contextlib +from typing import Any, Dict, List, Type + +from _nebari.stages.base import NebariKustomizeStage +from nebari import schema +from nebari.hookspecs import NebariStage, hookimpl + + +class InputSchema(schema.Base): + pass + + +class OutputSchema(schema.Base): + pass + + +class KuberHealthyStage(NebariKustomizeStage): + name = "11-kubernetes-kuberhealthy-healthchecks" + priority = 110 + + input_schema = InputSchema + output_schema = OutputSchema + + @property + def kustomize_vars(self): + return { + "namespace": self.config.namespace, + } + + @contextlib.contextmanager + def deploy( + self, stage_outputs: Dict[str, Dict[str, Any]], disable_prompt: bool = False + ): + if self.config.monitoring.healthchecks.enabled: + with super().deploy(stage_outputs, disable_prompt): + yield + else: + with self.destroy(stage_outputs, {}): + yield + + +@hookimpl +def nebari_stage() -> List[Type[NebariStage]]: + return [KuberHealthyStage] diff --git a/src/_nebari/stages/kubernetes_kuberhealthy_healthchecks/template/base/conda-store-healthcheck.yaml b/src/_nebari/stages/kubernetes_kuberhealthy_healthchecks/template/base/conda-store-healthcheck.yaml new file mode 100644 index 0000000000..37bcc854c7 --- /dev/null +++ b/src/_nebari/stages/kubernetes_kuberhealthy_healthchecks/template/base/conda-store-healthcheck.yaml @@ -0,0 +1,30 @@ +apiVersion: comcast.github.io/v1 +kind: KuberhealthyCheck +metadata: + name: conda-store-http-check + namespace: dev +spec: + runInterval: 5m + timeout: 10m + podSpec: + containers: + - name: https + image: kuberhealthy/http-check:v1.5.0 + imagePullPolicy: IfNotPresent + env: + - name: CHECK_URL + value: "http://nebari-conda-store-server.dev:5000" + - name: COUNT #### default: "0" + value: "5" + - name: SECONDS #### default: "0" + value: "1" + - name: PASSING_PERCENT #### default: "100" + value: "80" + resources: + requests: + cpu: 15m + memory: 15Mi + limits: + cpu: 25m + restartPolicy: Always + terminationGracePeriodSeconds: 5 diff --git a/src/_nebari/stages/kubernetes_kuberhealthy_healthchecks/template/base/jupyterhub-healthcheck.yaml b/src/_nebari/stages/kubernetes_kuberhealthy_healthchecks/template/base/jupyterhub-healthcheck.yaml new file mode 100644 index 0000000000..bb7ad2e05f --- /dev/null +++ b/src/_nebari/stages/kubernetes_kuberhealthy_healthchecks/template/base/jupyterhub-healthcheck.yaml @@ -0,0 +1,30 @@ +apiVersion: comcast.github.io/v1 +kind: KuberhealthyCheck +metadata: + name: jupyterhub-http-check + namespace: dev +spec: + runInterval: 5m + timeout: 10m + podSpec: + containers: + - name: https + image: kuberhealthy/http-check:v1.5.0 + imagePullPolicy: IfNotPresent + env: + - name: CHECK_URL + value: "http://hub.dev:8081" + - name: COUNT #### default: "0" + value: "5" + - name: SECONDS #### default: "0" + value: "1" + - name: PASSING_PERCENT #### default: "100" + value: "80" + resources: + requests: + cpu: 15m + memory: 15Mi + limits: + cpu: 25m + restartPolicy: Always + terminationGracePeriodSeconds: 5 diff --git a/src/_nebari/stages/kubernetes_kuberhealthy_healthchecks/template/base/keycloak-healthcheck.yaml b/src/_nebari/stages/kubernetes_kuberhealthy_healthchecks/template/base/keycloak-healthcheck.yaml new file mode 100644 index 0000000000..8b1f847aea --- /dev/null +++ b/src/_nebari/stages/kubernetes_kuberhealthy_healthchecks/template/base/keycloak-healthcheck.yaml @@ -0,0 +1,30 @@ +apiVersion: comcast.github.io/v1 +kind: KuberhealthyCheck +metadata: + name: keycloak-http-check + namespace: dev +spec: + runInterval: 5m + timeout: 10m + podSpec: + containers: + - name: https + image: kuberhealthy/http-check:v1.5.0 + imagePullPolicy: IfNotPresent + env: + - name: CHECK_URL + value: "http://keycloak-http.dev" + - name: COUNT #### default: "0" + value: "5" + - name: SECONDS #### default: "0" + value: "1" + - name: PASSING_PERCENT #### default: "100" + value: "80" + resources: + requests: + cpu: 15m + memory: 15Mi + limits: + cpu: 25m + restartPolicy: Always + terminationGracePeriodSeconds: 5 diff --git a/src/_nebari/stages/kubernetes_kuberhealthy_healthchecks/template/kustomization.yaml.tmpl b/src/_nebari/stages/kubernetes_kuberhealthy_healthchecks/template/kustomization.yaml.tmpl new file mode 100644 index 0000000000..06e8f4aaae --- /dev/null +++ b/src/_nebari/stages/kubernetes_kuberhealthy_healthchecks/template/kustomization.yaml.tmpl @@ -0,0 +1,17 @@ +# kustomization.yaml +apiVersion: kustomize.config.k8s.io/v1beta1 +kind: Kustomization + + +resources: +- ./base/conda-store-healthcheck.yaml +- ./base/jupyterhub-healthcheck.yaml +- ./base/keycloak-healthcheck.yaml + +patches: + - target: + kind: KuberhealthyCheck + patch: |- + - op: replace + path: /metadata/namespace + value: "{{ namespace }}" diff --git a/src/_nebari/stages/kubernetes_services/__init__.py b/src/_nebari/stages/kubernetes_services/__init__.py index fae8955de1..bd4dfd759e 100644 --- a/src/_nebari/stages/kubernetes_services/__init__.py +++ b/src/_nebari/stages/kubernetes_services/__init__.py @@ -6,6 +6,7 @@ from urllib.parse import urlencode from pydantic import ConfigDict, Field, field_validator, model_validator +from typing_extensions import Self from _nebari import constants from _nebari.stages.base import NebariTerraformStage @@ -14,7 +15,11 @@ NebariKubernetesProvider, NebariTerraformState, ) -from _nebari.utils import set_docker_image_tag, set_nebari_dask_version +from _nebari.utils import ( + byte_unit_conversion, + set_docker_image_tag, + set_nebari_dask_version, +) from _nebari.version import __version__ from nebari import schema from nebari.hookspecs import NebariStage, hookimpl @@ -38,6 +43,17 @@ def to_yaml(cls, representer, node): return representer.represent_str(node.value) +@schema.yaml_object(schema.yaml) +class SharedFsEnum(str, enum.Enum): + nfs = "nfs" + cephfs = "cephfs" + efs = "efs" + + @classmethod + def to_yaml(cls, representer, node): + return representer.represent_str(node.value) + + class DefaultImages(schema.Base): jupyterhub: str = f"quay.io/nebari/nebari-jupyterhub:{set_docker_image_tag()}" jupyterlab: str = f"quay.io/nebari/nebari-jupyterlab:{set_docker_image_tag()}" @@ -45,6 +61,10 @@ class DefaultImages(schema.Base): class Storage(schema.Base): + type: SharedFsEnum = Field( + default=None, + json_schema_extra={"immutable": True}, + ) conda_store: str = "200Gi" shared_filesystem: str = "200Gi" @@ -206,10 +226,16 @@ class MonitoringOverrides(schema.Base): minio: Dict = {} +class Healthchecks(schema.Base): + enabled: bool = False + kuberhealthy_helm_version: str = constants.KUBERHEALTHY_HELM_VERSION + + class Monitoring(schema.Base): enabled: bool = True overrides: MonitoringOverrides = MonitoringOverrides() minio_enabled: bool = True + healthchecks: Healthchecks = Healthchecks() class JupyterLabPioneer(schema.Base): @@ -235,13 +261,37 @@ class IdleCuller(schema.Base): server_shutdown_no_activity_timeout: int = 15 +class JupyterLabGalleryExhibit(schema.Base): + git: str + title: str + homepage: Optional[str] = None + description: Optional[str] = None + icon: Optional[str] = None + account: Optional[str] = None + token: Optional[str] = None + branch: Optional[str] = None + depth: Optional[int] = None + + +class JupyterLabGallerySettings(schema.Base): + title: str = "Examples" + destination: str = "examples" + exhibits: List[JupyterLabGalleryExhibit] = [] + hide_gallery_without_exhibits: bool = True + + class JupyterLab(schema.Base): default_settings: Dict[str, Any] = {} + gallery_settings: JupyterLabGallerySettings = JupyterLabGallerySettings() idle_culler: IdleCuller = IdleCuller() initial_repositories: List[Dict[str, str]] = [] preferred_dir: Optional[str] = None +class RookCeph(schema.Base): + storage_class_name: None | str = None + + class InputSchema(schema.Base): default_images: DefaultImages = DefaultImages() storage: Storage = Storage() @@ -316,6 +366,35 @@ class InputSchema(schema.Base): jupyterhub: JupyterHub = JupyterHub() jupyterlab: JupyterLab = JupyterLab() jhub_apps: JHubApps = JHubApps() + ceph: RookCeph = RookCeph() + + def _set_storage_type_default_value(self): + if self.storage.type is None: + if self.provider == schema.ProviderEnum.aws: + self.storage.type = SharedFsEnum.efs + else: + self.storage.type = SharedFsEnum.nfs + + @model_validator(mode="after") + def custom_validation(self) -> Self: + self._set_storage_type_default_value() + + if ( + self.storage.type == SharedFsEnum.cephfs + and self.provider == schema.ProviderEnum.local + ): + raise ValueError( + f'storage.type: "{self.storage.type.value}" is not supported for provider: "{self.provider.value}"' + ) + + if ( + self.storage.type == SharedFsEnum.efs + and self.provider != schema.ProviderEnum.aws + ): + raise ValueError( + f'storage.type: "{self.storage.type.value}" is only supported for provider: "{schema.ProviderEnum.aws.value}"' + ) + return self class OutputSchema(schema.Base): @@ -344,12 +423,18 @@ class ImageNameTag(schema.Base): tag: str +class RookCephInputVars(schema.Base): + rook_ceph_storage_class_name: None | str = None + + class CondaStoreInputVars(schema.Base): conda_store_environments: Dict[str, CondaEnvironment] = Field( alias="conda-store-environments" ) conda_store_default_namespace: str = Field(alias="conda-store-default-namespace") - conda_store_filesystem_storage: str = Field(alias="conda-store-filesystem-storage") + conda_store_filesystem_storage: float = Field( + alias="conda-store-filesystem-storage" + ) conda_store_object_storage: str = Field(alias="conda-store-object-storage") conda_store_extra_settings: Dict[str, Any] = Field( alias="conda-store-extra-settings" @@ -361,6 +446,11 @@ class CondaStoreInputVars(schema.Base): alias="conda-store-service-token-scopes" ) + @field_validator("conda_store_filesystem_storage", mode="before") + @classmethod + def handle_units(cls, value: Optional[str]) -> float: + return byte_unit_conversion(value, "GiB") + class JupyterhubInputVars(schema.Base): jupyterhub_theme: Dict[str, Any] = Field(alias="jupyterhub-theme") @@ -368,9 +458,12 @@ class JupyterhubInputVars(schema.Base): jupyterlab_default_settings: Dict[str, Any] = Field( alias="jupyterlab-default-settings" ) + jupyterlab_gallery_settings: JupyterLabGallerySettings = Field( + alias="jupyterlab-gallery-settings" + ) initial_repositories: str = Field(alias="initial-repositories") jupyterhub_overrides: List[str] = Field(alias="jupyterhub-overrides") - jupyterhub_stared_storage: str = Field(alias="jupyterhub-shared-storage") + jupyterhub_shared_storage: float = Field(alias="jupyterhub-shared-storage") jupyterhub_shared_endpoint: Optional[str] = Field( alias="jupyterhub-shared-endpoint", default=None ) @@ -382,6 +475,12 @@ class JupyterhubInputVars(schema.Base): jhub_apps_enabled: bool = Field(alias="jhub-apps-enabled") cloud_provider: str = Field(alias="cloud-provider") jupyterlab_preferred_dir: Optional[str] = Field(alias="jupyterlab-preferred-dir") + shared_fs_type: SharedFsEnum + + @field_validator("jupyterhub_shared_storage", mode="before") + @classmethod + def handle_units(cls, value: Optional[str]) -> float: + return byte_unit_conversion(value, "GiB") class DaskGatewayInputVars(schema.Base): @@ -468,6 +567,12 @@ def input_vars(self, stage_outputs: Dict[str, Dict[str, Any]]): "*/*": ["viewer"], }, }, + "conda-store-service-account": { + "primary_namespace": "", + "role_bindings": { + "*/*": ["admin"], + }, + }, } # Compound any logout URLs from extensions so they are are logged out in succession @@ -499,6 +604,8 @@ def input_vars(self, stage_outputs: Dict[str, Dict[str, Any]]): ), ) + rook_ceph_vars = RookCephInputVars() + conda_store_vars = CondaStoreInputVars( conda_store_environments={ k: v.model_dump() for k, v in self.config.environments.items() @@ -518,7 +625,7 @@ def input_vars(self, stage_outputs: Dict[str, Dict[str, Any]]): jupyterlab_image=_split_docker_image_name( self.config.default_images.jupyterlab ), - jupyterhub_stared_storage=self.config.storage.shared_filesystem, + jupyterhub_shared_storage=self.config.storage.shared_filesystem, jupyterhub_shared_endpoint=jupyterhub_shared_endpoint, cloud_provider=cloud_provider, jupyterhub_profiles=self.config.profiles.model_dump()["jupyterlab"], @@ -534,7 +641,14 @@ def input_vars(self, stage_outputs: Dict[str, Dict[str, Any]]): jhub_apps_enabled=self.config.jhub_apps.enabled, initial_repositories=str(self.config.jupyterlab.initial_repositories), jupyterlab_default_settings=self.config.jupyterlab.default_settings, + jupyterlab_gallery_settings=self.config.jupyterlab.gallery_settings, jupyterlab_preferred_dir=self.config.jupyterlab.preferred_dir, + shared_fs_type=( + # efs is equivalent to nfs in these modules + SharedFsEnum.nfs + if self.config.storage.type == SharedFsEnum.efs + else self.config.storage.type + ), ) dask_gateway_vars = DaskGatewayInputVars( @@ -572,6 +686,7 @@ def input_vars(self, stage_outputs: Dict[str, Dict[str, Any]]): return { **kubernetes_services_vars.model_dump(by_alias=True), + **rook_ceph_vars.model_dump(by_alias=True), **conda_store_vars.model_dump(by_alias=True), **jupyterhub_vars.model_dump(by_alias=True), **dask_gateway_vars.model_dump(by_alias=True), diff --git a/src/_nebari/stages/kubernetes_services/template/conda-store.tf b/src/_nebari/stages/kubernetes_services/template/conda-store.tf index 904a17e8df..c800f5b1e6 100644 --- a/src/_nebari/stages/kubernetes_services/template/conda-store.tf +++ b/src/_nebari/stages/kubernetes_services/template/conda-store.tf @@ -61,17 +61,19 @@ module "kubernetes-conda-store-server" { services = var.conda-store-service-token-scopes extra-settings = var.conda-store-extra-settings extra-config = var.conda-store-extra-config + conda-store-fs = var.shared_fs_type + + depends_on = [ + module.rook-ceph + ] } -module "conda-store-nfs-mount" { - source = "./modules/kubernetes/nfs-mount" +moved { + from = module.conda-store-nfs-mount + to = module.kubernetes-conda-store-server.module.conda-store-nfs-mount[0] +} - name = "conda-store" - namespace = var.environment - nfs_capacity = var.conda-store-filesystem-storage - nfs_endpoint = module.kubernetes-conda-store-server.endpoint_ip - depends_on = [ - module.kubernetes-conda-store-server - ] +locals { + conda-store-fs = var.shared_fs_type } diff --git a/src/_nebari/stages/kubernetes_services/template/dask_gateway.tf b/src/_nebari/stages/kubernetes_services/template/dask_gateway.tf index fb2fdc71fc..a47acee8fa 100644 --- a/src/_nebari/stages/kubernetes_services/template/dask_gateway.tf +++ b/src/_nebari/stages/kubernetes_services/template/dask_gateway.tf @@ -30,7 +30,7 @@ module "dask-gateway" { dask-etc-configmap-name = "dask-etc" # environments - conda-store-pvc = module.conda-store-nfs-mount.persistent_volume_claim.name + conda-store-pvc = module.kubernetes-conda-store-server.pvc conda-store-mount = "/home/conda" default-conda-store-namespace = var.conda-store-default-namespace conda-store-api-token = module.kubernetes-conda-store-server.service-tokens.dask-gateway @@ -42,4 +42,9 @@ module "dask-gateway" { cloud-provider = var.cloud-provider forwardauth_middleware_name = var.forwardauth_middleware_name + + depends_on = [ + module.kubernetes-nfs-server, + module.rook-ceph + ] } diff --git a/src/_nebari/stages/kubernetes_services/template/jupyterhub.tf b/src/_nebari/stages/kubernetes_services/template/jupyterhub.tf index 4f8bebb9e4..17b6f12411 100644 --- a/src/_nebari/stages/kubernetes_services/template/jupyterhub.tf +++ b/src/_nebari/stages/kubernetes_services/template/jupyterhub.tf @@ -19,7 +19,7 @@ variable "jupyterhub-overrides" { variable "jupyterhub-shared-storage" { description = "JupyterHub shared storage size [GB]" - type = string + type = number } variable "jupyterhub-shared-endpoint" { @@ -54,6 +54,26 @@ variable "jupyterlab-default-settings" { type = map(any) } +variable "jupyterlab-gallery-settings" { + description = "Server-side settings for jupyterlab-gallery extension" + type = object({ + title = optional(string) + destination = optional(string) + hide_gallery_without_exhibits = optional(bool) + exhibits = list(object({ + git = string + title = string + homepage = optional(string) + description = optional(string) + icon = optional(string) + account = optional(string) + token = optional(string) + branch = optional(string) + depth = optional(number) + })) + }) +} + variable "jupyterhub-hub-extraEnv" { description = "Extracted overrides to merge with jupyterhub.hub.extraEnv" type = string @@ -65,8 +85,28 @@ variable "idle-culler-settings" { type = any } +variable "shared_fs_type" { + type = string + description = "Use NFS or Ceph" + + validation { + condition = contains(["cephfs", "nfs"], var.shared_fs_type) + error_message = "Allowed values for input_parameter are \"cephfs\" or \"nfs\"." + } + +} + +locals { + jupyterhub-fs = var.shared_fs_type + jupyterhub-pvc-name = "jupyterhub-${var.environment}-share" + jupyterhub-pvc = local.jupyterhub-fs == "nfs" ? module.jupyterhub-nfs-mount[0].persistent_volume_claim.pvc : module.jupyterhub-cephfs-mount[0].persistent_volume_claim.pvc + enable-nfs-server = var.jupyterhub-shared-endpoint == null && (local.jupyterhub-fs == "nfs" || local.conda-store-fs == "nfs") +} + + + module "kubernetes-nfs-server" { - count = var.jupyterhub-shared-endpoint == null ? 1 : 0 + count = local.enable-nfs-server ? 1 : 0 source = "./modules/kubernetes/nfs-server" @@ -76,20 +116,43 @@ module "kubernetes-nfs-server" { node-group = var.node_groups.general } +moved { + from = module.jupyterhub-nfs-mount + to = module.jupyterhub-nfs-mount[0] +} module "jupyterhub-nfs-mount" { + count = local.jupyterhub-fs == "nfs" ? 1 : 0 source = "./modules/kubernetes/nfs-mount" name = "jupyterhub" namespace = var.environment nfs_capacity = var.jupyterhub-shared-storage nfs_endpoint = var.jupyterhub-shared-endpoint == null ? module.kubernetes-nfs-server.0.endpoint_ip : var.jupyterhub-shared-endpoint + nfs-pvc-name = local.jupyterhub-pvc-name depends_on = [ - module.kubernetes-nfs-server + module.kubernetes-nfs-server, + module.rook-ceph ] } +module "jupyterhub-cephfs-mount" { + count = local.jupyterhub-fs == "cephfs" ? 1 : 0 + source = "./modules/kubernetes/cephfs-mount" + + name = "jupyterhub" + namespace = var.environment + fs_capacity = var.jupyterhub-shared-storage + ceph-pvc-name = local.jupyterhub-pvc-name + + depends_on = [ + module.kubernetes-nfs-server, + module.rook-ceph + ] +} + + module "jupyterhub" { source = "./modules/kubernetes/services/jupyterhub" @@ -104,11 +167,11 @@ module "jupyterhub" { overrides = var.jupyterhub-overrides - home-pvc = module.jupyterhub-nfs-mount.persistent_volume_claim.name + home-pvc = local.jupyterhub-pvc - shared-pvc = module.jupyterhub-nfs-mount.persistent_volume_claim.name + shared-pvc = local.jupyterhub-pvc - conda-store-pvc = module.conda-store-nfs-mount.persistent_volume_claim.name + conda-store-pvc = module.kubernetes-conda-store-server.pvc.name conda-store-mount = "/home/conda" conda-store-environments = var.conda-store-environments default-conda-store-namespace = var.conda-store-default-namespace @@ -149,8 +212,15 @@ module "jupyterhub" { jupyterlab-default-settings = var.jupyterlab-default-settings + jupyterlab-gallery-settings = var.jupyterlab-gallery-settings + jupyterlab-pioneer-enabled = var.jupyterlab-pioneer-enabled jupyterlab-pioneer-log-format = var.jupyterlab-pioneer-log-format jupyterlab-preferred-dir = var.jupyterlab-preferred-dir + + depends_on = [ + module.kubernetes-nfs-server, + module.rook-ceph, + ] } diff --git a/src/_nebari/stages/kubernetes_services/template/jupyterhub_ssh.tf b/src/_nebari/stages/kubernetes_services/template/jupyterhub_ssh.tf index ec49fc6749..dd1726c560 100644 --- a/src/_nebari/stages/kubernetes_services/template/jupyterhub_ssh.tf +++ b/src/_nebari/stages/kubernetes_services/template/jupyterhub_ssh.tf @@ -5,5 +5,10 @@ module "kubernetes-jupyterhub-ssh" { jupyterhub_api_url = module.jupyterhub.internal_jupyterhub_url node-group = var.node_groups.general - persistent_volume_claim = module.jupyterhub-nfs-mount.persistent_volume_claim.name + persistent_volume_claim = local.jupyterhub-pvc + + depends_on = [ + module.kubernetes-nfs-server, + module.rook-ceph + ] } diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/cephfs-mount/main.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/cephfs-mount/main.tf new file mode 100644 index 0000000000..36471ed443 --- /dev/null +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/cephfs-mount/main.tf @@ -0,0 +1,21 @@ +resource "kubernetes_persistent_volume_claim" "main" { + metadata { + name = var.ceph-pvc-name + namespace = var.namespace + } + + spec { + access_modes = ["ReadWriteMany"] + storage_class_name = "ceph-filesystem-retain" # kubernetes_storage_class.main.metadata.0.name # Get this from a terraform output + resources { + requests = { + storage = "${var.fs_capacity}Gi" + } + } + } + + # Hack to avoid timeout while CephCluster is being created + timeouts { + create = "10m" + } +} diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/cephfs-mount/outputs.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/cephfs-mount/outputs.tf new file mode 100644 index 0000000000..a0e02b23d1 --- /dev/null +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/cephfs-mount/outputs.tf @@ -0,0 +1,11 @@ +output "persistent_volume_claim" { + description = "Name of persistent volume claim" + value = { + pvc = { + name = kubernetes_persistent_volume_claim.main.metadata.0.name + id = kubernetes_persistent_volume_claim.main.metadata.0.uid + } + namespace = var.namespace + kind = "persistentvolumeclaim" + } +} diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/cephfs-mount/variables.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/cephfs-mount/variables.tf new file mode 100644 index 0000000000..f593c803aa --- /dev/null +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/cephfs-mount/variables.tf @@ -0,0 +1,20 @@ +variable "name" { + description = "Prefix name form nfs mount kubernetes resource" + type = string +} + +variable "namespace" { + description = "Namespace to deploy nfs storage mount" + type = string +} + +variable "fs_capacity" { + description = "Capacity of NFS server mount in Gi" + type = number + default = 10 +} + +variable "ceph-pvc-name" { + description = "Name of the persistent volume claim" + type = string +} diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/nfs-mount/main.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/nfs-mount/main.tf index 4534be7b21..81ad1797e6 100644 --- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/nfs-mount/main.tf +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/nfs-mount/main.tf @@ -12,7 +12,7 @@ resource "kubernetes_persistent_volume" "main" { } spec { capacity = { - storage = var.nfs_capacity + storage = "${var.nfs_capacity}Gi" } storage_class_name = kubernetes_storage_class.main.metadata.0.name access_modes = ["ReadWriteMany"] @@ -28,7 +28,7 @@ resource "kubernetes_persistent_volume" "main" { resource "kubernetes_persistent_volume_claim" "main" { metadata { - name = "${var.name}-${var.namespace}-share" + name = var.nfs-pvc-name namespace = var.namespace } @@ -37,7 +37,7 @@ resource "kubernetes_persistent_volume_claim" "main" { storage_class_name = kubernetes_storage_class.main.metadata.0.name resources { requests = { - storage = var.nfs_capacity + storage = "${var.nfs_capacity}Gi" } } } diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/nfs-mount/outputs.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/nfs-mount/outputs.tf index d5318cf5be..a0e02b23d1 100644 --- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/nfs-mount/outputs.tf +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/nfs-mount/outputs.tf @@ -1,7 +1,10 @@ output "persistent_volume_claim" { description = "Name of persistent volume claim" value = { - name = kubernetes_persistent_volume_claim.main.metadata.0.name + pvc = { + name = kubernetes_persistent_volume_claim.main.metadata.0.name + id = kubernetes_persistent_volume_claim.main.metadata.0.uid + } namespace = var.namespace kind = "persistentvolumeclaim" } diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/nfs-mount/variables.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/nfs-mount/variables.tf index 88ddf6f32f..fe7294b53b 100644 --- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/nfs-mount/variables.tf +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/nfs-mount/variables.tf @@ -9,12 +9,17 @@ variable "namespace" { } variable "nfs_capacity" { - description = "Capacity of NFS server mount" - type = string - default = "10Gi" + description = "Capacity of NFS server mount in Gi" + type = number + default = 10 } variable "nfs_endpoint" { description = "Endpoint of nfs server" type = string } + +variable "nfs-pvc-name" { + description = "Name of the persistent volume claim" + type = string +} diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/nfs-server/main.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/nfs-server/main.tf index 1032d15ad7..95120343e2 100644 --- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/nfs-server/main.tf +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/nfs-server/main.tf @@ -8,7 +8,7 @@ resource "kubernetes_persistent_volume_claim" "main" { access_modes = ["ReadWriteOnce"] resources { requests = { - storage = var.nfs_capacity + storage = "${var.nfs_capacity}Gi" } } } diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/nfs-server/variables.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/nfs-server/variables.tf index 21e41a7e90..63025465b3 100644 --- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/nfs-server/variables.tf +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/nfs-server/variables.tf @@ -9,9 +9,9 @@ variable "namespace" { } variable "nfs_capacity" { - description = "Capacity of NFS server deployment" - type = string - default = "10Gi" + description = "Capacity of NFS server deployment in Gi" + type = number + default = 10 } variable "node-group" { diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/config/conda_store_config.py b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/config/conda_store_config.py index 6ed6232ba8..ad9b79843a 100644 --- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/config/conda_store_config.py +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/config/conda_store_config.py @@ -1,6 +1,12 @@ +import dataclasses import json import logging +import re import tempfile +import typing +import urllib +import urllib.parse +import urllib.request from pathlib import Path import requests @@ -17,7 +23,6 @@ def conda_store_config(path="/var/lib/conda-store/config.json"): config = conda_store_config() - # ================================== # conda-store settings # ================================== @@ -49,11 +54,15 @@ def conda_store_config(path="/var/lib/conda-store/config.json"): "conda-forge", "https://repo.anaconda.com/pkgs/main", ] +c.RBACAuthorizationBackend.role_mappings_version = 2 # ================================== # server settings # ================================== c.CondaStoreServer.log_level = logging.INFO +c.CondaStoreServer.log_format = ( + "%(asctime)s %(levelname)9s %(name)s:%(lineno)4s: %(message)s" +) c.CondaStoreServer.enable_ui = True c.CondaStoreServer.enable_api = True c.CondaStoreServer.enable_registry = True @@ -64,7 +73,6 @@ def conda_store_config(path="/var/lib/conda-store/config.json"): # This MUST start with `/` c.CondaStoreServer.url_prefix = "/conda-store" - # ================================== # auth settings # ================================== @@ -83,10 +91,280 @@ def conda_store_config(path="/var/lib/conda-store/config.json"): c.GenericOAuthAuthentication.user_data_key = "preferred_username" c.GenericOAuthAuthentication.tls_verify = False +CONDA_STORE_ROLE_PERMISSIONS_ORDER = ["viewer", "developer", "admin"] + + +@dataclasses.dataclass +class CondaStoreNamespaceRole: + namespace: str + role: str + + +@dataclasses.dataclass +class KeyCloakCondaStoreRoleScopes: + scopes: str + log: logging.Logger + + def _validate_role(self, role): + valid = role in CONDA_STORE_ROLE_PERMISSIONS_ORDER + self.log.info(f"role: {role} is {'valid' if valid else 'invalid'}") + return valid + + def parse_role_and_namespace( + self, text + ) -> typing.Optional[CondaStoreNamespaceRole]: + # The regex pattern + pattern = r"^(\w+)!namespace=([^!]+)$" + + # Perform the regex search + match = re.search(pattern, text) + + # Extract the permission and namespace if there is a match + if match and self._validate_role(match.group(1)): + return CondaStoreNamespaceRole( + namespace=match.group(2), role=match.group(1) + ) + else: + return None + + def parse_scope(self) -> typing.List[CondaStoreNamespaceRole]: + """Parsed scopes from keycloak role's attribute and returns a list of role/namespace + if scopes' syntax is valid otherwise return [] + + Example: + Given scopes as "viewer!namespace=scipy,admin!namespace=pycon", the function will + return [{"role": "viewer", "namespace": "scipy"}, {"role": "admin", "namespace": "pycon"}] + """ + if not self.scopes: + self.log.info(f"No scope found: {self.scopes}, skipping role") + return [] + scope_list = self.scopes.split(",") + parsed_scopes = [] + self.log.info(f"Scopes to parse: {scope_list}") + for scope_text in scope_list: + parsed_scope = self.parse_role_and_namespace(scope_text) + parsed_scopes.append(parsed_scope) + if not parsed_scope: + self.log.info(f"Unable to parse: {scope_text}, skipping keycloak role") + return [] + return parsed_scopes + class KeyCloakAuthentication(GenericOAuthAuthentication): + conda_store_api_url = f"https://{config['external-url']}/conda-store/api/v1" + access_token_url = config["token_url_internal"] + realm_api_url = config["realm_api_url_internal"] + service_account_token = config["service-tokens-mapping"][ + "conda-store-service-account" + ] + + def _get_conda_store_client_id(self, token: str) -> str: + # Get the clients list to find the "id" of "conda-store" client. + self.log.info("Getting conda store client id") + clients_data = self._fetch_api(endpoint="clients/", token=token) + conda_store_clients = [ + client for client in clients_data if client["clientId"] == "conda_store" + ] + self.log.info(f"conda store clients: {conda_store_clients}") + assert len(conda_store_clients) == 1 + conda_store_client_id = conda_store_clients[0]["id"] + return conda_store_client_id + + async def _delete_conda_store_roles(self, request, namespace: str, username: str): + self.log.info( + f"Delete all conda-store roles on namespace: {namespace} for user: {username}" + ) + conda_store = await get_conda_store(request) + with conda_store.session_factory() as db: + api.delete_namespace_role(db, namespace, other=username) + db.commit() + + async def _create_conda_store_role( + self, request, namespace_role: CondaStoreNamespaceRole, username: str + ): + self.log.info( + f"Creating conda-store roles on namespace: {namespace_role.namespace} for user: {username}" + ) + conda_store = await get_conda_store(request) + with conda_store.session_factory() as db: + api.create_namespace_role( + db, namespace_role.namespace, username, namespace_role.role + ) + db.commit() + + def _get_keycloak_token(self) -> str: + body = urllib.parse.urlencode( + { + "client_id": self.client_id, + "client_secret": self.client_secret, + "grant_type": "client_credentials", + } + ) + self.log.info(f"Getting token from access token url: {self.access_token_url}") + req = urllib.request.Request(self.access_token_url, data=body.encode()) + response = urllib.request.urlopen(req) + data = json.loads(response.read()) + return data["access_token"] # type: ignore[no-any-return] + + def _fetch_api(self, endpoint: str, token: str): + request_url = f"{self.realm_api_url}/{endpoint}" + req = urllib.request.Request( + request_url, + method="GET", + headers={"Authorization": f"Bearer {token}"}, + ) + self.log.info(f"Making request to: {request_url}") + with urllib.request.urlopen(req) as response: + data = json.loads(response.read()) + return data + + async def _remove_current_bindings(self, request, username): + """Remove current roles for the user to make sure only the roles defined in + keycloak are applied: + - to avoid inconsistency in user roles + - single source of truth + - roles that are added in keycloak and then later removed from keycloak are actually removed from conda-store. + """ + entity_bindings = self._get_current_entity_bindings(username) + self.log.info("Remove current role bindings for the user") + for entity, role in entity_bindings.items(): + if entity not in {"default/*", "filesystem/*"}: + namespace = entity.split("/")[0] + self.log.info( + f"Removing current role {role} on namespace {namespace} " + f"for user {username}" + ) + await self._delete_conda_store_roles(request, namespace, username) + + async def _apply_roles_from_keycloak(self, request, user_data): + token = self._get_keycloak_token() + conda_store_client_id = self._get_conda_store_client_id(token) + conda_store_client_roles = self._get_conda_store_client_roles_for_user( + user_data["sub"], conda_store_client_id, token + ) + await self._remove_current_bindings(request, user_data["preferred_username"]) + await self._apply_conda_store_roles_from_keycloak( + request, conda_store_client_roles, user_data["preferred_username"] + ) + + def _filter_duplicate_namespace_roles_with_max_permissions( + self, namespace_roles: typing.List[CondaStoreNamespaceRole] + ): + """Filter duplicate roles in keycloak such that to apply only the one with the highest + permissions. + + Example: + role 1: namespace: foo, role: viewer + role 2: namespace: foo, role: admin + We need to apply only the role 2 as that one has higher permissions. + """ + self.log.info("Filtering duplicate roles for same namespace") + namespace_role_mapping: typing.Dict[str:CondaStoreNamespaceRole] = {} + for namespace_role in namespace_roles: + namespace = namespace_role.namespace + new_role = namespace_role.role + + existing_role: CondaStoreNamespaceRole = namespace_role_mapping.get( + namespace + ) + if not existing_role: + # Add if not already added + namespace_role_mapping[namespace] = namespace_role + else: + # Only add if the permissions of this role is higher than existing + new_role_priority = CONDA_STORE_ROLE_PERMISSIONS_ORDER.index(new_role) + existing_role_priority = CONDA_STORE_ROLE_PERMISSIONS_ORDER.index( + existing_role.role + ) + if new_role_priority > existing_role_priority: + namespace_role_mapping[namespace] = new_role + return list(namespace_role_mapping.values()) + + def _get_permissions_from_keycloak_role( + self, keycloak_role + ) -> typing.List[CondaStoreNamespaceRole]: + self.log.info(f"Getting permissions from keycloak role: {keycloak_role}") + role_attributes = keycloak_role["attributes"] + # scopes returns a list with a value say ["viewer!namespace=pycon,developer!namespace=scipy"] + scopes = role_attributes.get("scopes", [""])[0] + k_cstore_scopes = KeyCloakCondaStoreRoleScopes(scopes=scopes, log=self.log) + return k_cstore_scopes.parse_scope() + + async def _apply_conda_store_roles_from_keycloak( + self, request, conda_store_client_roles, username + ): + self.log.info( + f"Apply conda store roles from keycloak roles: {conda_store_client_roles}, user: {username}" + ) + role_permissions: typing.List[CondaStoreNamespaceRole] = [] + for conda_store_client_role in conda_store_client_roles: + role_permissions += self._get_permissions_from_keycloak_role( + conda_store_client_role + ) + + self.log.info("Filtering duplicate namespace role for max permissions") + filtered_namespace_role: typing.List[CondaStoreNamespaceRole] = ( + self._filter_duplicate_namespace_roles_with_max_permissions( + role_permissions + ) + ) + self.log.info(f"Final role permissions to apply: {filtered_namespace_role}") + for namespace_role in filtered_namespace_role: + if namespace_role.namespace.lower() == username.lower(): + self.log.info("Role for given user's namespace, skipping") + continue + try: + await self._delete_conda_store_roles( + request, namespace_role.namespace, username + ) + await self._create_conda_store_role(request, namespace_role, username) + except ValueError as e: + self.log.error( + f"Failed to add permissions for namespace: {namespace_role.namespace} to user: {username}" + ) + self.log.exception(e) + + def _get_keycloak_conda_store_roles_with_attributes( + self, roles: dict, client_id: str, token: str + ): + """This fetches all roles by id to fetch their attributes.""" + roles_rich = [] + for role in roles: + # If this takes too much time, which isn't the case right now, we can + # also do multi-threaded requests + role_rich = self._fetch_api( + endpoint=f"roles-by-id/{role['id']}?client={client_id}", token=token + ) + roles_rich.append(role_rich) + return roles_rich + + def _get_conda_store_client_roles_for_user( + self, user_id, conda_store_client_id, token + ): + """Get roles for the client named 'conda-store' for the given user_id.""" + self.log.info( + f"Get conda store client roles for user: {user_id}, conda_store_client_id: {conda_store_client_id}" + ) + user_roles = self._fetch_api( + endpoint=f"users/{user_id}/role-mappings/clients/{conda_store_client_id}/composite", + token=token, + ) + client_roles_rich = self._get_keycloak_conda_store_roles_with_attributes( + user_roles, client_id=conda_store_client_id, token=token + ) + self.log.info(f"conda store client roles: {client_roles_rich}") + return client_roles_rich + + def _get_current_entity_bindings(self, username): + entity = schema.AuthenticationToken( + primary_namespace=username, role_bindings={} + ) + self.log.info(f"entity: {entity}") + entity_bindings = self.authorization.get_entity_bindings(entity) + self.log.info(f"current entity_bindings: {entity_bindings}") + return entity_bindings + async def authenticate(self, request): - # 1. using the callback_url code and state in request oauth_access_token = self._get_oauth_token(request) if oauth_access_token is None: return None # authentication failed @@ -98,9 +376,14 @@ async def authenticate(self, request): ) response.raise_for_status() user_data = response.json() - username = user_data["preferred_username"] + try: + await self._apply_roles_from_keycloak(request, user_data=user_data) + except Exception as e: + self.log.error("Adding roles from keycloak failed") + self.log.exception(e) + # superadmin gets access to everything if "conda_store_superadmin" in user_data.get("roles", []): return schema.AuthenticationToken( @@ -119,7 +402,9 @@ async def authenticate(self, request): if role in role_mappings } default_namespace = config["default-namespace"] + self.log.info(f"default_namespace: {default_namespace}") namespaces = {username, "global", default_namespace} + self.log.info(f"namespaces: {namespaces}") role_bindings = { f"{username}/*": {"admin"}, f"{default_namespace}/*": {"viewer"}, diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/output.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/output.tf index 571e75ef07..a00e0d2c80 100644 --- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/output.tf +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/output.tf @@ -17,3 +17,8 @@ output "service-tokens" { description = "Service tokens for conda-store" value = { for k, _ in var.services : k => base64encode(random_password.conda_store_service_token[k].result) } } + +output "pvc" { + description = "Shared PVC name for conda-store" + value = local.shared-pvc +} diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/server.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/server.tf index ab9edd87e8..8a29bc2d41 100644 --- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/server.tf +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/server.tf @@ -13,22 +13,28 @@ resource "kubernetes_secret" "conda-store-secret" { data = { "config.json" = jsonencode({ - external-url = var.external-url - minio-username = module.minio.root_username - minio-password = module.minio.root_password - minio-service = module.minio.service - redis-password = module.redis.root_password - redis-service = module.redis.service - postgres-username = module.postgresql.root_username - postgres-password = module.postgresql.root_password - postgres-service = module.postgresql.service - openid-config = module.conda-store-openid-client.config - extra-settings = var.extra-settings - extra-config = var.extra-config - default-namespace = var.default-namespace-name + external-url = var.external-url + minio-username = module.minio.root_username + minio-password = module.minio.root_password + minio-service = module.minio.service + redis-password = module.redis.root_password + redis-service = module.redis.service + postgres-username = module.postgresql.root_username + postgres-password = module.postgresql.root_password + postgres-service = module.postgresql.service + openid-config = module.conda-store-openid-client.config + extra-settings = var.extra-settings + extra-config = var.extra-config + default-namespace = var.default-namespace-name + token_url_internal = "http://keycloak-http.${var.namespace}.svc/auth/realms/${var.realm_id}/protocol/openid-connect/token" + realm_api_url_internal = "http://keycloak-http.${var.namespace}.svc/auth/admin/realms/${var.realm_id}" service-tokens = { for service, value in var.services : base64encode(random_password.conda_store_service_token[service].result) => value } + # So that the mapping can be used in conda-store config itself + service-tokens-mapping = { + for service, _ in var.services : service => base64encode(random_password.conda_store_service_token[service].result) + } extra-settings = var.extra-settings extra-config = var.extra-config }) @@ -63,6 +69,10 @@ module "conda-store-openid-client" { callback-url-paths = [ "https://${var.external-url}/conda-store/oauth_callback" ] + service-accounts-enabled = true + service-account-roles = [ + "view-realm", "view-users", "view-clients" + ] } diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/shared-pvc.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/shared-pvc.tf new file mode 100644 index 0000000000..cb6809d265 --- /dev/null +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/shared-pvc.tf @@ -0,0 +1,36 @@ +module "conda-store-nfs-mount" { + count = var.conda-store-fs == "nfs" ? 1 : 0 + source = "../../../../modules/kubernetes/nfs-mount" + + name = "conda-store" + namespace = var.namespace + nfs_capacity = var.nfs_capacity + nfs_endpoint = kubernetes_service.nfs.spec.0.cluster_ip + nfs-pvc-name = local.conda-store-pvc-name + + depends_on = [ + kubernetes_deployment.worker, + ] +} + + +locals { + conda-store-pvc-name = "conda-store-${var.namespace}-share" + new-pvc-name = "nebari-conda-store-storage" + create-pvc = var.conda-store-fs == "nfs" + enable-nfs-server-worker = var.conda-store-fs == "nfs" + pvc-name = var.conda-store-fs == "nfs" ? local.new-pvc-name : local.conda-store-pvc-name + shared-pvc = var.conda-store-fs == "nfs" ? module.conda-store-nfs-mount[0].persistent_volume_claim.pvc : module.conda-store-cephfs-mount[0].persistent_volume_claim.pvc +} + + + +module "conda-store-cephfs-mount" { + count = var.conda-store-fs == "cephfs" ? 1 : 0 + source = "../../../../modules/kubernetes/cephfs-mount" + + name = "conda-store" + namespace = var.namespace + fs_capacity = var.nfs_capacity # conda-store-filesystem-storage + ceph-pvc-name = local.conda-store-pvc-name +} diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/variables.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/variables.tf index fd5ff0fa2f..d90e1650de 100644 --- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/variables.tf +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/variables.tf @@ -76,3 +76,13 @@ variable "services" { description = "Map of services tokens and scopes for conda-store" type = map(any) } + +variable "conda-store-fs" { + type = string + description = "Use NFS or Ceph" + + validation { + condition = contains(["cephfs", "nfs"], var.conda-store-fs) + error_message = "Allowed values for input_parameter are \"cephfs\", or \"nfs\"." + } +} diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/worker.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/worker.tf index c3e725dbea..9410a4cc65 100644 --- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/worker.tf +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/conda-store/worker.tf @@ -28,6 +28,8 @@ resource "kubernetes_service" "nfs" { resource "kubernetes_persistent_volume_claim" "main" { + count = local.create-pvc ? 1 : 0 + metadata { name = "${var.name}-conda-store-storage" namespace = var.namespace @@ -37,7 +39,7 @@ resource "kubernetes_persistent_volume_claim" "main" { access_modes = ["ReadWriteOnce"] resources { requests = { - storage = var.nfs_capacity + storage = "${var.nfs_capacity}Gi" } } } @@ -134,32 +136,35 @@ resource "kubernetes_deployment" "worker" { } } - container { - name = "nfs-server" - image = "gcr.io/google_containers/volume-nfs:0.8" + dynamic "container" { + for_each = local.enable-nfs-server-worker ? [1] : [] + content { + name = "nfs-server" + image = "gcr.io/google_containers/volume-nfs:0.8" - port { - name = "nfs" - container_port = 2049 - } + port { + name = "nfs" + container_port = 2049 + } - port { - name = "mountd" - container_port = 20048 - } + port { + name = "mountd" + container_port = 20048 + } - port { - name = "rpcbind" - container_port = 111 - } + port { + name = "rpcbind" + container_port = 111 + } - security_context { - privileged = true - } + security_context { + privileged = true + } - volume_mount { - mount_path = "/exports" - name = "storage" + volume_mount { + mount_path = "/exports" + name = "storage" + } } } @@ -191,7 +196,7 @@ resource "kubernetes_deployment" "worker" { # directly reference the pvc may no longer be issue in # future # claim_name = kubernetes_persistent_volume_claim.main.metadata.0.name - claim_name = "${var.name}-conda-store-storage" + claim_name = local.pvc-name } } security_context { @@ -201,4 +206,19 @@ resource "kubernetes_deployment" "worker" { } } } + depends_on = [ + module.conda-store-cephfs-mount + ] + + lifecycle { + replace_triggered_by = [ + null_resource.pvc + ] + } +} + +resource "null_resource" "pvc" { + triggers = { + pvc = var.conda-store-fs + } } diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/controler.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/controller.tf similarity index 100% rename from src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/controler.tf rename to src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/controller.tf diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/files/gateway_config.py b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/files/gateway_config.py index 2219d14e56..c58e3aa90d 100644 --- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/files/gateway_config.py +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/files/gateway_config.py @@ -129,7 +129,7 @@ def base_node_group(options): default_node_group if worker_node_group is None else worker_node_group ) - # check `schduler_extra_pod_config` first + # check `scheduler_extra_pod_config` first scheduler_node_group = ( config["profiles"][options.profile] .get("scheduler_extra_pod_config", {}) diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/gateway.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/gateway.tf index 62265b350b..4c1d638a4c 100644 --- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/gateway.tf +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/gateway.tf @@ -18,7 +18,7 @@ resource "kubernetes_secret" "gateway" { cluster-image = var.cluster-image profiles = var.profiles default-conda-store-namespace = var.default-conda-store-namespace - conda-store-pvc = var.conda-store-pvc + conda-store-pvc = var.conda-store-pvc.name conda-store-mount = var.conda-store-mount worker-node-group = var.worker-node-group conda-store-api-token = var.conda-store-api-token @@ -170,7 +170,7 @@ resource "kubernetes_deployment" "gateway" { volume { name = "conda-store" persistent_volume_claim { - claim_name = var.conda-store-pvc + claim_name = var.conda-store-pvc.name } } @@ -245,4 +245,14 @@ resource "kubernetes_deployment" "gateway" { } } } + + lifecycle { + replace_triggered_by = [null_resource.conda-store-pvc] + } +} + +resource "null_resource" "conda-store-pvc" { + triggers = { + pvc = var.conda-store-pvc.id + } } diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/variables.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/variables.tf index 074e1214d0..121405a322 100644 --- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/variables.tf +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/dask-gateway/variables.tf @@ -177,7 +177,10 @@ variable "profiles" { variable "conda-store-pvc" { description = "Name for persistent volume claim to use for conda-store directory" - type = string + type = object({ + name = string + id = string + }) } variable "conda-store-mount" { diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub-ssh/sftp.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub-ssh/sftp.tf index 87f70f9a58..4f53567e4c 100644 --- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub-ssh/sftp.tf +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub-ssh/sftp.tf @@ -76,7 +76,7 @@ resource "kubernetes_deployment" "jupyterhub-sftp" { volume { name = "home" persistent_volume_claim { - claim_name = var.persistent_volume_claim + claim_name = var.persistent_volume_claim.name } } @@ -131,4 +131,16 @@ resource "kubernetes_deployment" "jupyterhub-sftp" { } } } + lifecycle { + replace_triggered_by = [ + null_resource.pvc, + ] + } +} + +# hack to force the deployment to update when the pvc changes +resource "null_resource" "pvc" { + triggers = { + pvc = var.persistent_volume_claim.id + } } diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub-ssh/variables.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub-ssh/variables.tf index 430eb14888..d90b1ddad9 100644 --- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub-ssh/variables.tf +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub-ssh/variables.tf @@ -30,7 +30,7 @@ variable "jupyterhub-ssh-image" { }) default = { name = "quay.io/jupyterhub-ssh/ssh" - tag = "0.0.1-0.dev.git.136.ha610981" + tag = "0.0.1-0.dev.git.149.he5107a4" } } @@ -48,5 +48,8 @@ variable "jupyterhub-sftp-image" { variable "persistent_volume_claim" { description = "name of persistent volume claim to mount" - type = string + type = object({ + name = string + id = string + }) } diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/configmaps.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/configmaps.tf index 4b8f9145b9..bfee219e9e 100644 --- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/configmaps.tf +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/configmaps.tf @@ -47,6 +47,13 @@ resource "local_file" "jupyter_jupyterlab_pioneer_config_py" { } } +resource "local_sensitive_file" "jupyter_gallery_config_json" { + content = jsonencode({ + "GalleryManager" = var.jupyterlab-gallery-settings + }) + filename = "${path.module}/files/jupyter/jupyter_gallery_config.json" +} + resource "local_file" "overrides_json" { content = jsonencode(local.jupyterlab-overrides-json-object) @@ -70,7 +77,8 @@ resource "kubernetes_config_map" "etc-ipython" { locals { etc-jupyter-config-data = merge( { - "jupyter_server_config.py" = local_file.jupyter_server_config_py.content, + "jupyter_server_config.py" = local_file.jupyter_server_config_py.content, + "jupyter_gallery_config.json" = local_sensitive_file.jupyter_gallery_config_json.content, }, var.jupyterlab-pioneer-enabled ? { # quotes are must here, as terraform would otherwise think py is a property of @@ -89,7 +97,8 @@ locals { resource "kubernetes_config_map" "etc-jupyter" { depends_on = [ local_file.jupyter_server_config_py, - local_file.jupyter_jupyterlab_pioneer_config_py + local_file.jupyter_jupyterlab_pioneer_config_py, + local_sensitive_file.jupyter_gallery_config_json ] metadata { diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyter/jupyter_server_config.py.tpl b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyter/jupyter_server_config.py.tpl index d5e089dfa3..f8206a3ec9 100644 --- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyter/jupyter_server_config.py.tpl +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyter/jupyter_server_config.py.tpl @@ -4,6 +4,13 @@ # Extra config available at: # https://zero-to-jupyterhub.readthedocs.io/en/1.x/jupyterhub/customizing/user-management.html#culling-user-pods + +# Refuse to serve content from handlers missing authentication guards, unless +# the handler is explicitly allow-listed with `@allow_unauthenticated`; this +# prevents accidental exposure of information by extensions installed in the +# single-user server when their handlers are missing authentication guards. +c.ServerApp.allow_unauthenticated_access = False + # Enable Show Hidden Files menu option in View menu c.ContentsManager.allow_hidden = True c.FileContentsManager.allow_hidden = True diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyterhub/02-spawner.py b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyterhub/02-spawner.py index ea9511a4cc..aa2153dc29 100644 --- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyterhub/02-spawner.py +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyterhub/02-spawner.py @@ -1,3 +1,5 @@ +import inspect + import kubernetes.client.models from tornado import gen @@ -80,9 +82,14 @@ def service_for_jhub_apps(name, url): service_for_jhub_apps(name="Users", url="/auth/admin/nebari/console/"), service_for_jhub_apps(name="Environments", url="/conda-store"), service_for_jhub_apps(name="Monitoring", url="/monitoring"), - service_for_jhub_apps(name="VSCode", url="/user/[USER]/vscode"), ] ) c.JupyterHub.template_paths = theme_template_paths - c = install_jhub_apps(c, spawner_to_subclass=KubeSpawner) + + kwargs = {} + jhub_apps_signature = inspect.signature(install_jhub_apps) + if "oauth_no_confirm" in jhub_apps_signature.parameters: + kwargs["oauth_no_confirm"] = True + + c = install_jhub_apps(c, spawner_to_subclass=KubeSpawner, **kwargs) diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyterhub/03-profiles.py b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyterhub/03-profiles.py index 8ccac70ca0..b298ae5ae1 100644 --- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyterhub/03-profiles.py +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyterhub/03-profiles.py @@ -48,10 +48,16 @@ def base_profile_home_mounts(username): } MKDIR_OWN_DIRECTORY = ( - "mkdir -p /mnt/{path} && chmod 777 /mnt/{path} && cp -r /etc/skel/. /mnt/{path}" + "mkdir -p /mnt/{path} && chmod 777 /mnt/{path} && " + # Copy skel files/folders not starting with '..' to user home directory. + # Filtering out ..* removes some unneeded folders (k8s configmap mount implementation details). + "find /etc/skel/. -maxdepth 1 -not -name '.' -not -name '..*' -exec " + "cp -rL {escaped_brackets} /mnt/{path} \;" ) command = MKDIR_OWN_DIRECTORY.format( - path=pvc_home_mount_path.format(username=username) + # have to escape the brackets since this string will be formatted later by KubeSpawner + escaped_brackets="{{}}", + path=pvc_home_mount_path.format(username=username), ) init_containers = [ { @@ -76,11 +82,11 @@ def base_profile_home_mounts(username): } -def base_profile_shared_mounts(groups): +def base_profile_shared_mounts(groups_to_volume_mount): """Configure the group directory mounts for user. - Ensure that {shared}/{group} directory exists and user has - permissions to read/write/execute. Kubernetes does not allow the + Ensure that {shared}/{group} directory exists based on the scope availability + and if user has permissions to read/write/execute. Kubernetes does not allow the same pvc to be a volume thus we must check that the home and share pvc are not the same for some operation. @@ -97,40 +103,42 @@ def base_profile_shared_mounts(groups): {"name": "shared", "persistentVolumeClaim": {"claimName": shared_pvc_name}} ) - extra_container_config = { - "volumeMounts": [ - { - "mountPath": pod_shared_mount_path.format(group=group), - "name": "shared" if home_pvc_name != shared_pvc_name else "home", - "subPath": pvc_shared_mount_path.format(group=group), - } - for group in groups - ] - } + extra_container_config = {"volumeMounts": []} MKDIR_OWN_DIRECTORY = "mkdir -p /mnt/{path} && chmod 777 /mnt/{path}" command = " && ".join( [ MKDIR_OWN_DIRECTORY.format(path=pvc_shared_mount_path.format(group=group)) - for group in groups + for group in groups_to_volume_mount ] ) + init_containers = [ { "name": "initialize-shared-mounts", "image": "busybox:1.31", "command": ["sh", "-c", command], "securityContext": {"runAsUser": 0}, - "volumeMounts": [ - { - "mountPath": f"/mnt/{pvc_shared_mount_path.format(group=group)}", - "name": "shared" if home_pvc_name != shared_pvc_name else "home", - "subPath": pvc_shared_mount_path.format(group=group), - } - for group in groups - ], + "volumeMounts": [], } ] + + for group in groups_to_volume_mount: + extra_container_config["volumeMounts"].append( + { + "mountPath": pod_shared_mount_path.format(group=group), + "name": "shared" if home_pvc_name != shared_pvc_name else "home", + "subPath": pvc_shared_mount_path.format(group=group), + } + ) + init_containers[0]["volumeMounts"].append( + { + "mountPath": f"/mnt/{pvc_shared_mount_path.format(group=group)}", + "name": "shared" if home_pvc_name != shared_pvc_name else "home", + "subPath": pvc_shared_mount_path.format(group=group), + } + ) + return { "extra_pod_config": extra_pod_config, "extra_container_config": extra_container_config, @@ -469,7 +477,9 @@ def profile_conda_store_viewer_token(): } -def render_profile(profile, username, groups, keycloak_profilenames): +def render_profile( + profile, username, groups, keycloak_profilenames, groups_to_volume_mount +): """Render each profile for user. If profile is not available for given username, groups returns @@ -507,7 +517,7 @@ def render_profile(profile, username, groups, keycloak_profilenames): deep_merge, [ base_profile_home_mounts(username), - base_profile_shared_mounts(groups), + base_profile_shared_mounts(groups_to_volume_mount), profile_conda_store_mounts(username, groups), base_profile_extra_mounts(), configure_user(username, groups), @@ -546,11 +556,15 @@ def render_profiles(spawner): auth_state = yield spawner.user.get_auth_state() username = auth_state["oauth_user"]["preferred_username"] + # only return the lowest level group name # e.g. /projects/myproj -> myproj # and /developers -> developers groups = [Path(group).name for group in auth_state["oauth_user"]["groups"]] - spawner.log.info(f"user info: {username} {groups}") + groups_with_permission_to_mount = [ + Path(group).name + for group in auth_state.get("groups_with_permission_to_mount", []) + ] keycloak_profilenames = auth_state["oauth_user"].get("jupyterlab_profiles", []) @@ -560,7 +574,13 @@ def render_profiles(spawner): filter( None, [ - render_profile(p, username, groups, keycloak_profilenames) + render_profile( + p, + username, + groups, + keycloak_profilenames, + groups_with_permission_to_mount, + ) for p in profile_list ], ) diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyterhub/04-auth.py b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyterhub/04-auth.py index bc6fb6a721..2694b2a34e 100644 --- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyterhub/04-auth.py +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/jupyterhub/04-auth.py @@ -1,3 +1,4 @@ +import asyncio import json import os import time @@ -55,13 +56,27 @@ async def update_auth_model(self, auth_model): user_roles_rich = await self._get_roles_with_attributes( roles=user_roles, client_id=jupyterhub_client_id, token=token ) + + # Include which groups have permission to mount shared directories (user by + # profiles.py) + auth_model["auth_state"]["groups_with_permission_to_mount"] = ( + await self.get_client_groups_with_mount_permissions( + user_groups=auth_model["auth_state"]["oauth_user"]["groups"], + user_roles=user_roles_rich, + client_id=jupyterhub_client_id, + token=token, + ) + ) + keycloak_api_call_time_taken = time.time() - keycloak_api_call_start user_roles_rich_names = {role["name"] for role in user_roles_rich} + user_roles_non_jhub_client = [ {"name": role} for role in user_roles_from_claims if role in (user_roles_from_claims - user_roles_rich_names) ] + auth_model["roles"] = [ { "name": role["name"], @@ -70,12 +85,16 @@ async def update_auth_model(self, auth_model): } for role in [*user_roles_rich, *user_roles_non_jhub_client] ] + # note: because the roles check is comprehensive, we need to re-add the admin and user roles if auth_model["admin"]: auth_model["roles"].append({"name": "admin"}) + if await self.check_allowed(auth_model["name"], auth_model): auth_model["roles"].append({"name": "user"}) + execution_time = time.time() - start + self.log.info( f"Auth model update complete, time taken: {execution_time}s " f"time taken for keycloak api call: {keycloak_api_call_time_taken}s " @@ -116,6 +135,7 @@ async def load_managed_roles(self): client_roles_rich = await self._get_jupyterhub_client_roles( jupyterhub_client_id=jupyterhub_client_id, token=token ) + # Includes roles like "default-roles-nebari", "offline_access", "uma_authorization" realm_roles = await self._fetch_api(endpoint="roles", token=token) roles = { @@ -126,38 +146,117 @@ async def load_managed_roles(self): } for role in [*realm_roles, *client_roles_rich] } + # we could use either `name` (e.g. "developer") or `path` ("/developer"); # since the default claim key returns `path`, it seems preferable. - group_name_key = "path" for realm_role in realm_roles: role_name = realm_role["name"] role = roles[role_name] # fetch role assignments to groups - groups = await self._fetch_api(f"roles/{role_name}/groups", token=token) - role["groups"] = [group[group_name_key] for group in groups] - # fetch role assignments to users - users = await self._fetch_api(f"roles/{role_name}/users", token=token) - role["users"] = [user["username"] for user in users] + role.update( + await self._get_users_and_groups_for_role( + role_name, + token=token, + ) + ) + for client_role in client_roles_rich: role_name = client_role["name"] role = roles[role_name] # fetch role assignments to groups - groups = await self._fetch_api( - f"clients/{jupyterhub_client_id}/roles/{role_name}/groups", token=token - ) - role["groups"] = [group[group_name_key] for group in groups] - # fetch role assignments to users - users = await self._fetch_api( - f"clients/{jupyterhub_client_id}/roles/{role_name}/users", token=token + role.update( + await self._get_users_and_groups_for_role( + role_name, + token=token, + client_id=jupyterhub_client_id, + ) ) - role["users"] = [user["username"] for user in users] return list(roles.values()) + async def get_client_groups_with_mount_permissions( + self, user_groups, user_roles, client_id, token + ): + """ + Asynchronously retrieves the list of client groups with mount permissions + that the user belongs to. + """ + + roles_with_permission = [] + groups_with_permission_to_mount = set() + + # Filter roles with the shared-directory component and scope + for role in user_roles: + attributes = role.get("attributes", {}) + + role_component = attributes.get("component", [None])[0] + role_scopes = attributes.get("scopes", [None])[0] + + if ( + role_component == "shared-directory" + and role_scopes == "write:shared-mount" + ): + role_name = role.get("name") + roles_with_permission.append(role_name) + + # Fetch groups for all relevant roles concurrently + group_fetch_tasks = [ + self._fetch_api( + endpoint=f"clients/{client_id}/roles/{role_name}/groups", + token=token, + ) + for role_name in roles_with_permission + ] + + all_role_groups = await asyncio.gather(*group_fetch_tasks) + + # Collect group names with permissions + for role_groups in all_role_groups: + groups_with_permission_to_mount |= set( + [group["path"] for group in role_groups] + ) + + return list(groups_with_permission_to_mount & set(user_groups)) + + async def _get_users_and_groups_for_role( + self, role_name, token, client_id=None, group_name_key="path" + ): + """ + Asynchronously fetches and maps groups and users to a specified role. + + Returns: + dict: A dictionary with groups (path or name) and users mapped to the role. + { + "groups": ["/group1", "/group2"], + "users": ["user1", "user2"], + }, + """ + # Prepare endpoints + group_endpoint = f"roles/{role_name}/groups" + user_endpoint = f"roles/{role_name}/users" + + if client_id: + group_endpoint = f"clients/{client_id}/roles/{role_name}/groups" + user_endpoint = f"clients/{client_id}/roles/{role_name}/users" + + # fetch role assignments to groups (Fetch data concurrently) + groups, users = await asyncio.gather( + *[ + self._fetch_api(endpoint=group_endpoint, token=token), + self._fetch_api(endpoint=user_endpoint, token=token), + ] + ) + + # Process results + return { + "groups": [group[group_name_key] for group in groups], + "users": [user["username"] for user in users], + } + def _get_scope_from_role(self, role): """Return scopes from role if the component is jupyterhub""" role_scopes = role.get("attributes", {}).get("scopes", []) - component = role.get("attributes", {}).get("component") + component = role.get("attributes", {}).get("component", []) # Attributes are returned as a single-element array, unless `##` delimiter is used in Keycloak # See this: https://stackoverflow.com/questions/68954733/keycloak-client-role-attribute-array if component == ["jupyterhub"] and role_scopes: @@ -179,11 +278,11 @@ def validate_scopes(self, role_scopes): return [] async def _get_roles_with_attributes(self, roles: dict, client_id: str, token: str): - """This fetches all roles by id to fetch there attributes.""" + """This fetches all roles by id to fetch their attributes.""" roles_rich = [] for role in roles: # If this takes too much time, which isn't the case right now, we can - # also do multi-threaded requests + # also do multithreaded requests role_rich = await self._fetch_api( endpoint=f"roles-by-id/{role['id']}?client={client_id}", token=token ) diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/skel/.bashrc b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/skel/.bashrc index a427972228..58a612cc46 100644 --- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/skel/.bashrc +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/files/skel/.bashrc @@ -4,6 +4,10 @@ # ~/.bashrc: executed by bash(1) for non-login shells. # see /usr/share/doc/bash/examples/startup-files (in the package bash-doc) # for examples +# +# Regular Nebari users do not have write permissions to modify the ~/.bashrc file. +# Users can add their own customizations to the ~/.bash_profile file. +# More details can be found in the docs https://www.nebari.dev/docs/faq#can-i-modify-the-bashrc-file-on-nebari. # If not running interactively, don't do anything case $- in diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/main.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/main.tf index fe7716cf88..06cd4d6dd1 100644 --- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/main.tf +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/main.tf @@ -57,7 +57,7 @@ resource "helm_release" "jupyterhub" { repository = "https://jupyterhub.github.io/helm-chart/" chart = "jupyterhub" - version = "4.0.0-0.dev.git.6619.hd126b1bd" + version = "4.0.0-0.dev.git.6707.h109668fd" values = concat([ file("${path.module}/values.yaml"), @@ -69,8 +69,8 @@ resource "helm_release" "jupyterhub" { theme = var.theme profiles = var.profiles argo-workflows-enabled = var.argo-workflows-enabled - home-pvc = var.home-pvc - shared-pvc = var.shared-pvc + home-pvc = var.home-pvc.name + shared-pvc = var.shared-pvc.name conda-store-pvc = var.conda-store-pvc conda-store-mount = var.conda-store-mount default-conda-store-namespace = var.default-conda-store-namespace @@ -216,8 +216,25 @@ resource "helm_release" "jupyterhub" { name = "proxy.secretToken" value = random_password.proxy_secret_token.result } + + depends_on = [ + var.home-pvc, + var.shared-pvc, + ] + + lifecycle { + replace_triggered_by = [ + null_resource.home-pvc, + ] + } + } +resource "null_resource" "home-pvc" { + triggers = { + home-pvc = var.home-pvc.id + } +} resource "kubernetes_manifest" "jupyterhub" { manifest = { @@ -279,6 +296,42 @@ module "jupyterhub-openid-client" { "developer" = ["jupyterhub_developer", "dask_gateway_developer"] "analyst" = ["jupyterhub_developer"] } + client_roles = [ + { + "name" : "allow-app-sharing-role", + "description" : "Allow app sharing for apps created via JupyterHub App Launcher (jhub-apps)", + "groups" : [], + "attributes" : { + # grants permissions to share server + # grants permissions to read other user's names + # grants permissions to read other groups' names + # The later two are required for sharing with a group or user + "scopes" : "shares,read:users:name,read:groups:name" + "component" : "jupyterhub" + } + }, + { + "name" : "allow-read-access-to-services-role", + "description" : "Allow read access to services, such that they are visible on the home page e.g. conda-store", + # Adding it to analyst group such that it's applied to every user. + "groups" : ["analyst"], + "attributes" : { + # grants permissions to read services + "scopes" : "read:services", + "component" : "jupyterhub" + } + }, + { + "name" : "allow-group-directory-creation-role", + "description" : "Grants a group the ability to manage the creation of its corresponding mounted directory.", + "groups" : ["admin", "analyst", "developer"], + "attributes" : { + # grants permissions to mount group folder to shared dir + "scopes" : "write:shared-mount", + "component" : "shared-directory" + } + }, + ] callback-url-paths = [ "https://${var.external-url}/hub/oauth_callback", var.jupyterhub-logout-redirect-url diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/variables.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/variables.tf index 577dedc8ef..41089d391f 100644 --- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/variables.tf +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/jupyterhub/variables.tf @@ -48,12 +48,18 @@ variable "user-node-group" { variable "home-pvc" { description = "Name for persistent volume claim to use for home directory uses /home/{username}" - type = string + type = object({ + name = string + id = string + }) } variable "shared-pvc" { description = "Name for persistent volume claim to use for shared directory uses /share/{group}" - type = string + type = object({ + name = string + id = string + }) } variable "conda-store-pvc" { @@ -163,6 +169,26 @@ variable "jupyterlab-default-settings" { type = map(any) } +variable "jupyterlab-gallery-settings" { + description = "Server-side settings for jupyterlab-gallery extension" + type = object({ + title = optional(string) + destination = optional(string) + hide_gallery_without_exhibits = optional(bool) + exhibits = list(object({ + git = string + title = string + homepage = optional(string) + description = optional(string) + icon = optional(string) + account = optional(string) + token = optional(string) + branch = optional(string) + depth = optional(number) + })) + }) +} + variable "jupyterlab-pioneer-enabled" { description = "Enable JupyterLab Pioneer for telemetry" type = bool diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/keycloak-client/main.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/keycloak-client/main.tf index 7a2c3e648d..e23aeb13c8 100644 --- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/keycloak-client/main.tf +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/keycloak-client/main.tf @@ -99,7 +99,6 @@ resource "keycloak_role" "main" { description = each.key } - data "keycloak_group" "main" { for_each = var.role_mapping @@ -117,3 +116,41 @@ resource "keycloak_group_roles" "group_roles" { exhaustive = false } + +resource "keycloak_role" "default_client_roles" { + for_each = { for role in var.client_roles : role.name => role } + realm_id = var.realm_id + client_id = keycloak_openid_client.main.id + name = each.value.name + description = each.value.description + attributes = each.value.attributes +} + +locals { + group_role_mapping = flatten([ + for role_object in var.client_roles : [ + for group_name in role_object.groups : { + group : group_name + role_name : role_object.name + } + ] + ]) + + client_roles_groups = toset([ + for index, value in local.group_role_mapping : value.group + ]) +} + +data "keycloak_group" "client_role_groups" { + for_each = local.client_roles_groups + realm_id = var.realm_id + name = each.value +} + +resource "keycloak_group_roles" "assign_roles" { + for_each = { for idx, value in local.group_role_mapping : idx => value } + realm_id = var.realm_id + group_id = data.keycloak_group.client_role_groups[each.value.group].id + role_ids = [keycloak_role.default_client_roles[each.value.role_name].id] + exhaustive = false +} diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/keycloak-client/variables.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/keycloak-client/variables.tf index b4e709c6a5..7626cc2b93 100644 --- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/keycloak-client/variables.tf +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/keycloak-client/variables.tf @@ -46,3 +46,14 @@ variable "jupyterlab_profiles_mapper" { type = bool default = false } + +variable "client_roles" { + description = "Create roles for the client and assign it to groups" + default = [] + type = list(object({ + name = string + description = string + groups = optional(list(string)) + attributes = map(any) + })) +} diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/dashboards/Main/conda_store.json b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/dashboards/Main/conda_store.json index 724b130bc0..87d38ade21 100644 --- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/dashboards/Main/conda_store.json +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/dashboards/Main/conda_store.json @@ -112,7 +112,7 @@ { "datasource": { "type": "prometheus", - "uid": "PBFA97CFB590B2093" + "uid": "prometheus" }, "exemplar": true, "expr": "conda_store_build_queued", @@ -123,7 +123,7 @@ { "datasource": { "type": "prometheus", - "uid": "PBFA97CFB590B2093" + "uid": "prometheus" }, "exemplar": true, "expr": "conda_store_build_building", @@ -136,7 +136,7 @@ { "datasource": { "type": "prometheus", - "uid": "PBFA97CFB590B2093" + "uid": "prometheus" }, "exemplar": true, "expr": "conda_store_build_completed", @@ -149,7 +149,7 @@ { "datasource": { "type": "prometheus", - "uid": "PBFA97CFB590B2093" + "uid": "prometheus" }, "exemplar": true, "expr": "conda_store_build_failed", @@ -165,7 +165,7 @@ { "datasource": { "type": "prometheus", - "uid": "PBFA97CFB590B2093" + "uid": "prometheus" }, "fieldConfig": { "defaults": { @@ -216,7 +216,7 @@ { "datasource": { "type": "prometheus", - "uid": "PBFA97CFB590B2093" + "uid": "prometheus" }, "exemplar": true, "expr": "conda_store_environments", @@ -231,7 +231,7 @@ { "datasource": { "type": "prometheus", - "uid": "PBFA97CFB590B2093" + "uid": "prometheus" }, "fieldConfig": { "defaults": { @@ -282,7 +282,7 @@ { "datasource": { "type": "prometheus", - "uid": "PBFA97CFB590B2093" + "uid": "prometheus" }, "exemplar": true, "expr": "conda_store_build_queued", @@ -297,7 +297,7 @@ { "datasource": { "type": "prometheus", - "uid": "PBFA97CFB590B2093" + "uid": "prometheus" }, "fieldConfig": { "defaults": { @@ -348,7 +348,7 @@ { "datasource": { "type": "prometheus", - "uid": "PBFA97CFB590B2093" + "uid": "prometheus" }, "exemplar": true, "expr": "conda_store_build_building", @@ -357,13 +357,13 @@ "refId": "A" } ], - "title": "Buliding", + "title": "Building", "type": "stat" }, { "datasource": { "type": "prometheus", - "uid": "PBFA97CFB590B2093" + "uid": "prometheus" }, "fieldConfig": { "defaults": { @@ -414,7 +414,7 @@ { "datasource": { "type": "prometheus", - "uid": "PBFA97CFB590B2093" + "uid": "prometheus" }, "exemplar": true, "expr": "conda_store_build_completed", @@ -429,7 +429,7 @@ { "datasource": { "type": "prometheus", - "uid": "PBFA97CFB590B2093" + "uid": "prometheus" }, "fieldConfig": { "defaults": { @@ -480,7 +480,7 @@ { "datasource": { "type": "prometheus", - "uid": "PBFA97CFB590B2093" + "uid": "prometheus" }, "exemplar": true, "expr": "conda_store_build_failed", @@ -508,7 +508,7 @@ { "datasource": { "type": "prometheus", - "uid": "PBFA97CFB590B2093" + "uid": "prometheus" }, "fieldConfig": { "defaults": { @@ -559,7 +559,7 @@ { "datasource": { "type": "prometheus", - "uid": "PBFA97CFB590B2093" + "uid": "prometheus" }, "exemplar": true, "expr": "conda_store_disk_usage / conda_store_disk_total", @@ -574,7 +574,7 @@ { "datasource": { "type": "prometheus", - "uid": "PBFA97CFB590B2093" + "uid": "prometheus" }, "fieldConfig": { "defaults": { @@ -621,7 +621,7 @@ { "datasource": { "type": "prometheus", - "uid": "PBFA97CFB590B2093" + "uid": "prometheus" }, "exemplar": true, "expr": "conda_store_disk_total / (2.0^30)", @@ -637,7 +637,7 @@ { "datasource": { "type": "prometheus", - "uid": "PBFA97CFB590B2093" + "uid": "prometheus" }, "fieldConfig": { "defaults": { @@ -688,7 +688,7 @@ { "datasource": { "type": "prometheus", - "uid": "PBFA97CFB590B2093" + "uid": "prometheus" }, "exemplar": true, "expr": "conda_store_disk_usage / (2^30)", diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/dashboards/Main/keycloak.json b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/dashboards/Main/keycloak.json index 41fea2ad23..8dbbd2d4f8 100644 --- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/dashboards/Main/keycloak.json +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/dashboards/Main/keycloak.json @@ -403,7 +403,7 @@ "format": "time_series", "interval": "", "intervalFactor": 1, - "legendFormat": "Comitted", + "legendFormat": "Committed", "refId": "C" }, { diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/dashboards/Main/traefik.json b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/dashboards/Main/traefik.json index 503b48af7c..188491c73c 100644 --- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/dashboards/Main/traefik.json +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/dashboards/Main/traefik.json @@ -31,7 +31,7 @@ "collapsed": false, "datasource": { "type": "prometheus", - "uid": "PBFA97CFB590B2093" + "uid": "prometheus" }, "gridPos": { "h": 1, @@ -108,7 +108,7 @@ { "datasource": { "type": "prometheus", - "uid": "PBFA97CFB590B2093" + "uid": "prometheus" }, "exemplar": true, "expr": "count(kube_pod_status_ready{namespace=\"$namespace\",condition=\"true\",pod=~\"nebari-traefik-ingress-.*\", job=\"kube-state-metrics\"})", @@ -211,7 +211,7 @@ "collapsed": false, "datasource": { "type": "prometheus", - "uid": "PBFA97CFB590B2093" + "uid": "prometheus" }, "gridPos": { "h": 1, @@ -506,7 +506,7 @@ "collapsed": false, "datasource": { "type": "prometheus", - "uid": "PBFA97CFB590B2093" + "uid": "prometheus" }, "gridPos": { "h": 1, @@ -806,7 +806,7 @@ "collapsed": false, "datasource": { "type": "prometheus", - "uid": "PBFA97CFB590B2093" + "uid": "prometheus" }, "gridPos": { "h": 1, @@ -917,7 +917,7 @@ "collapsed": false, "datasource": { "type": "prometheus", - "uid": "PBFA97CFB590B2093" + "uid": "prometheus" }, "gridPos": { "h": 1, @@ -1168,7 +1168,7 @@ }, "datasource": { "type": "prometheus", - "uid": "PBFA97CFB590B2093" + "uid": "prometheus" }, "definition": "label_values(kube_pod_container_info{pod=~\".*traefik.*\"}, namespace)", "hide": 0, diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/dashboards/Main/usage_report.json b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/dashboards/Main/usage_report.json index 6bef780268..71f32f5f95 100644 --- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/dashboards/Main/usage_report.json +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/dashboards/Main/usage_report.json @@ -238,14 +238,14 @@ "datasource": { "uid": "$PROMETHEUS_DS" }, - "definition": "", + "definition": "label_values({service=\"hub\"},namespace)", "hide": 0, - "includeAll": true, + "includeAll": false, "multi": false, "name": "hub", "options": [], "query": { - "query": "label_values(kube_service_labels{service=\"hub\"}, namespace)", + "query": "label_values({service=\"hub\"},namespace)", "refId": "Prometheus-hub-Variable-Query" }, "refresh": 1, diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/main.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/main.tf index 869f616c71..3933b1f009 100644 --- a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/main.tf +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/monitoring/main.tf @@ -3,16 +3,28 @@ resource "random_password" "grafana_admin_password" { special = false } +resource "kubernetes_secret" "grafana_oauth_secret" { + metadata { + name = "grafana-oauth-secret" + namespace = var.namespace + } + + data = { + "grafana-oauth-client-id" = module.grafana-client-id.config.client_id + "grafana-oauth-client-secret" = module.grafana-client-id.config.client_secret + } +} + resource "helm_release" "prometheus-grafana" { name = "nebari" namespace = var.namespace repository = "https://prometheus-community.github.io/helm-charts" chart = "kube-prometheus-stack" - version = "30.1.0" + version = "58.4.0" values = concat([ file("${path.module}/values.yaml"), - # https://github.com/prometheus-community/helm-charts/blob/kube-prometheus-stack-30.1.0/charts/kube-prometheus-stack/values.yaml + # https://github.com/prometheus-community/helm-charts/blob/kube-prometheus-stack-58.4.0/charts/kube-prometheus-stack/values.yaml jsonencode({ alertmanager = { alertmanagerSpec = { @@ -51,6 +63,19 @@ resource "helm_release" "prometheus-grafana" { "${var.node-group.key}" = var.node-group.value } additionalScrapeConfigs = [ + { + job_name = "kuberhealthy" + scrape_interval = "1m" + honor_labels = true + metrics_path = "/metrics" + static_configs = [ + { + targets = [ + "kuberhealthy.${var.namespace}.svc.cluster.local" + ] + } + ] + }, { job_name = "Keycloak Target" metrics_path = "/auth/realms/master/metrics" @@ -206,6 +231,8 @@ resource "helm_release" "prometheus-grafana" { } } + envFromSecret = kubernetes_secret.grafana_oauth_secret.metadata[0].name + "grafana.ini" : { server = { protocol = "http" @@ -222,8 +249,8 @@ resource "helm_release" "prometheus-grafana" { enabled = "true" name = "Login Keycloak" allow_sign_up = "true" - client_id = module.grafana-client-id.config.client_id - client_secret = module.grafana-client-id.config.client_secret + client_id = "$__env{grafana-oauth-client-id}" + client_secret = "$__env{grafana-oauth-client-secret}" scopes = "profile" auth_url = module.grafana-client-id.config.authentication_url token_url = module.grafana-client-id.config.token_url diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/rook-ceph/cluster-values.yaml.tftpl b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/rook-ceph/cluster-values.yaml.tftpl new file mode 100644 index 0000000000..2c1253b6e5 --- /dev/null +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/rook-ceph/cluster-values.yaml.tftpl @@ -0,0 +1,163 @@ +# https://github.com/rook/rook/blob/v1.14.7/deploy/charts/rook-ceph-cluster/values.yaml +monitoring: + enabled: false # TODO: Enable monitoring when nebari-config.yaml has it enabled +toolbox: + enabled: false # for debugging purposes +cephBlockPools: [] +cephObjectStores: [] +cephClusterSpec: + cephConfig: + global: + osd_pool_default_size: "1" + mon_warn_on_pool_no_redundancy: "false" + bdev_flock_retry: "20" + bluefs_buffered_io: "false" + mon_data_avail_warn: "10" + placement: + additionalProperties: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: ${node_group.key} + operator: In + values: + - ${node_group.value} + # values from https://raw.githubusercontent.com/rook/rook/release-1.14/deploy/examples/cluster-on-pvc.yaml + dataDirHostPath: /var/lib/rook + mon: + # Set the number of mons to be started. Generally recommended to be 3. + # For highest availability, an odd number of mons should be specified. + count: 1 + allowMultiplePerNode: true + # A volume claim template can be specified in which case new monitors (and + # monitors created during fail over) will construct a PVC based on the + # template for the monitor's primary storage. Changes to the template do not + # affect existing monitors. Log data is stored on the HostPath under + # dataDirHostPath. If no storage requirement is specified, a default storage + # size appropriate for monitor data will be used. + volumeClaimTemplate: + spec: + %{ if storageClassName != null }storageClassName: ${storageClassName}%{ endif } + resources: + requests: + storage: 10Gi + cephVersion: + image: quay.io/ceph/ceph:v18.2.2 + allowUnsupported: false + mgr: + count: 1 + allowMultiplePerNode: true + modules: + - name: rook + enabled: true + dashboard: + enabled: true + ssl: false + crashCollector: + disable: true # false + logCollector: + enabled: true + periodicity: daily # one of: hourly, daily, weekly, monthly + maxLogSize: 500M # SUFFIX may be 'M' or 'G'. Must be at least 1M. + storage: + storageClassDeviceSets: + - name: set1 + # The number of OSDs to create from this device set + count: 1 + portable: true + tuneDeviceClass: true + tuneFastDeviceClass: true + # whether to encrypt the deviceSet or not + encrypted: false + # Since the OSDs could end up on any node, an effort needs to be made to spread the OSDs + # across nodes as much as possible. Unfortunately the pod anti-affinity breaks down + # as soon as you have more than one OSD per node. The topology spread constraints will + # give us an even spread on K8s 1.18 or newer. + placement: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: ${node_group.key} + operator: In + values: + - ${node_group.value} + resources: + volumeClaimTemplates: + - metadata: + name: data + # if you are looking at giving your OSD a different CRUSH device class than the one detected by Ceph + # annotations: + # crushDeviceClass: hybrid + spec: + resources: + requests: + storage: ${storage_capacity_Gi}Gi # TODO: Look into auto resizing these as needed + # IMPORTANT: Change the storage class depending on your environment + %{ if storageClassName != null }storageClassName: ${storageClassName}%{ endif } + volumeMode: Block + accessModes: + - ReadWriteOnce + # when onlyApplyOSDPlacement is false, will merge both placement.All() and storageClassDeviceSets.Placement. + onlyApplyOSDPlacement: false + resources: + priorityClassNames: + # If there are multiple nodes available in a failure domain (e.g. zones), the + # mons and osds can be portable and set the system-cluster-critical priority class. + mon: system-node-critical + osd: system-node-critical + mgr: system-cluster-critical + disruptionManagement: + managePodBudgets: true + osdMaintenanceTimeout: 30 + pgHealthCheckTimeout: 0 + +cephFileSystems: + - name: ceph-filesystem + # see https://github.com/rook/rook/blob/master/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#filesystem-settings for available configuration + spec: + metadataPool: + replicated: + size: 1 + dataPools: + - failureDomain: host + replicated: + size: 1 + # Optional and highly recommended, 'data0' by default, see https://github.com/rook/rook/blob/master/Documentation/CRDs/Shared-Filesystem/ceph-filesystem-crd.md#pools + name: data0 + metadataServer: + activeCount: 1 + activeStandby: true + resources: + limits: + memory: "4Gi" + requests: + cpu: "1000m" + memory: "4Gi" + priorityClassName: system-cluster-critical + storageClass: + enabled: true + isDefault: false + name: ceph-filesystem + # (Optional) specify a data pool to use, must be the name of one of the data pools above, 'data0' by default + pool: data0 + reclaimPolicy: Delete + allowVolumeExpansion: true + volumeBindingMode: "Immediate" + annotations: { } + labels: { } + mountOptions: [] + # see https://github.com/rook/rook/blob/master/Documentation/Storage-Configuration/Shared-Filesystem-CephFS/filesystem-storage.md#provision-storage for available configuration + parameters: + # The secrets contain Ceph admin credentials. + csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner + csi.storage.k8s.io/provisioner-secret-namespace: "{{ .Release.Namespace }}" + csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner + csi.storage.k8s.io/controller-expand-secret-namespace: "{{ .Release.Namespace }}" + csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node + csi.storage.k8s.io/node-stage-secret-namespace: "{{ .Release.Namespace }}" + # Specify the filesystem type of the volume. If not specified, csi-provisioner + # will set default as `ext4`. Note that `xfs` is not recommended due to potential deadlock + # in hyperconverged settings where the volume is mounted on the same node as the osds. + csi.storage.k8s.io/fstype: ext4 diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/rook-ceph/main.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/rook-ceph/main.tf new file mode 100644 index 0000000000..32be674561 --- /dev/null +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/rook-ceph/main.tf @@ -0,0 +1,72 @@ +resource "helm_release" "rook-ceph-cluster" { + name = "rook-ceph-cluster" + namespace = var.namespace + repository = "https://charts.rook.io/release" + chart = "rook-ceph-cluster" + version = "v1.14.7" + wait = true + wait_for_jobs = true + + values = concat([ + templatefile("${path.module}/cluster-values.yaml.tftpl", + { + "storageClassName" = var.storage_class_name, + "node_group" = var.node_group, + "storage_capacity_Gi" = var.ceph_storage_capacity, + }), + jsonencode({ + operatorNamespace = var.operator_namespace, + }) + ], var.overrides) +} + +locals { + storage-class = data.kubernetes_storage_class.rook-ceph-fs-delete-sc + storage-class-base-name = "ceph-filesystem" +} + +data "kubernetes_storage_class" "rook-ceph-fs-delete-sc" { + metadata { + name = local.storage-class-base-name # TODO: Make sure we get this right + } + depends_on = [helm_release.rook-ceph-cluster] +} + +resource "kubernetes_storage_class" "ceph-retain-sc" { + metadata { + name = "${local.storage-class-base-name}-retain" # "ceph-filesystem-retain" # TODO: Make sure we get this right + } + storage_provisioner = local.storage-class.storage_provisioner # "rook-ceph.cephfs.csi.ceph.com" + reclaim_policy = "Retain" + volume_binding_mode = local.storage-class.volume_binding_mode + allow_volume_expansion = local.storage-class.allow_volume_expansion + parameters = local.storage-class.parameters + + depends_on = [data.kubernetes_storage_class.rook-ceph-fs-delete-sc] +} + +# This is necessary on GKE to completely create a ceph cluster +resource "kubernetes_resource_quota" "rook_critical_pods" { + metadata { + name = "rook-critical-pods" + namespace = var.namespace + labels = { + "addonmanager.kubernetes.io/mode" = "Reconcile" + } + } + + spec { + hard = { + "pods" = "1G" + } + + scope_selector { + match_expression { + operator = "In" + scope_name = "PriorityClass" + values = ["system-node-critical", "system-cluster-critical"] + } + } + } + # depends_on = [helm_release.rook-ceph] +} diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/rook-ceph/operator-values.yaml b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/rook-ceph/operator-values.yaml new file mode 100644 index 0000000000..d9155da1ef --- /dev/null +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/rook-ceph/operator-values.yaml @@ -0,0 +1 @@ +# https://github.com/rook/rook/blob/v1.14.7/deploy/charts/rook-ceph/values.yaml diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/rook-ceph/variables.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/rook-ceph/variables.tf new file mode 100644 index 0000000000..67969dc083 --- /dev/null +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/rook-ceph/variables.tf @@ -0,0 +1,35 @@ +variable "namespace" { + description = "deploy rook-ceph operator in this namespace" + type = string +} + +variable "operator_namespace" { + description = "namespace where the rook-ceph operator is deployed" + type = string +} + + +variable "overrides" { + description = "Rook Ceph helm chart overrides" + type = list(string) + default = [] +} + +variable "storage_class_name" { + description = "Name of the storage class to create" + type = string + default = null +} + +variable "node_group" { + description = "Node key value pair for bound resources" + type = object({ + key = string + value = string + }) +} + +variable "ceph_storage_capacity" { + description = "Ceph storage capacity in Gi" + type = number +} diff --git a/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/rook-ceph/versions.tf b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/rook-ceph/versions.tf new file mode 100644 index 0000000000..341def1365 --- /dev/null +++ b/src/_nebari/stages/kubernetes_services/template/modules/kubernetes/services/rook-ceph/versions.tf @@ -0,0 +1,13 @@ +terraform { + required_providers { + helm = { + source = "hashicorp/helm" + version = "2.1.2" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = "2.20.0" + } + } + required_version = ">= 1.0" +} diff --git a/src/_nebari/stages/kubernetes_services/template/rook-ceph.tf b/src/_nebari/stages/kubernetes_services/template/rook-ceph.tf new file mode 100644 index 0000000000..c40b6fae33 --- /dev/null +++ b/src/_nebari/stages/kubernetes_services/template/rook-ceph.tf @@ -0,0 +1,49 @@ +# ======================= VARIABLES ====================== +variable "rook_ceph_storage_class_name" { + description = "Name of the storage class to create" + type = string +} + +locals { + enable-ceph-cluster = local.jupyterhub-fs == "cephfs" || local.conda-store-fs == "cephfs" +} +# ====================== RESOURCES ======================= +module "rook-ceph" { + count = local.enable-ceph-cluster ? 1 : 0 + source = "./modules/kubernetes/services/rook-ceph" + namespace = var.environment + operator_namespace = var.environment + + storage_class_name = var.rook_ceph_storage_class_name + node_group = var.node_groups.general + ceph_storage_capacity = var.jupyterhub-shared-storage + var.conda-store-filesystem-storage + + depends_on = [helm_release.rook-ceph] +} + +resource "helm_release" "rook-ceph" { + count = local.enable-ceph-cluster ? 1 : 0 + + name = "rook-ceph" + namespace = var.environment + repository = "https://charts.rook.io/release" + chart = "rook-ceph" + version = "v1.14.7" + + values = concat([ + file("./modules/kubernetes/services/rook-ceph/operator-values.yaml"), + jsonencode({ + nodeSelector = { + "${var.node_groups.general.key}" = var.node_groups.general.value + }, + monitoring = { + enabled = false # TODO: Enable monitoring when nebari-config.yaml has it enabled + }, + csi = { + enableRbdDriver = false, # necessary to provision block storage, but saves some cpu and memory if not needed + }, + }) + ], + # var.overrides # TODO: Add overrides for Rook-Ceph Operator + ) +} diff --git a/src/_nebari/stages/terraform_state/__init__.py b/src/_nebari/stages/terraform_state/__init__.py index edd4b9ed8a..37568be130 100644 --- a/src/_nebari/stages/terraform_state/__init__.py +++ b/src/_nebari/stages/terraform_state/__init__.py @@ -8,9 +8,11 @@ from pydantic import field_validator +from _nebari import utils from _nebari.provider import terraform from _nebari.provider.cloud import azure_cloud from _nebari.stages.base import NebariTerraformStage +from _nebari.stages.tf_objects import NebariConfig from _nebari.utils import ( AZURE_TF_STATE_RESOURCE_GROUP_SUFFIX, construct_azure_resource_group_name, @@ -170,8 +172,9 @@ def state_imports(self) -> List[Tuple[str, str]]: return [] def tf_objects(self) -> List[Dict]: + resources = [NebariConfig(self.config)] if self.config.provider == schema.ProviderEnum.gcp: - return [ + return resources + [ terraform.Provider( "google", project=self.config.google_cloud_platform.project, @@ -179,13 +182,13 @@ def tf_objects(self) -> List[Dict]: ), ] elif self.config.provider == schema.ProviderEnum.aws: - return [ + return resources + [ terraform.Provider( "aws", region=self.config.amazon_web_services.region ), ] else: - return [] + return resources def input_vars(self, stage_outputs: Dict[str, Dict[str, Any]]): if self.config.provider == schema.ProviderEnum.do: @@ -231,7 +234,11 @@ def input_vars(self, stage_outputs: Dict[str, Dict[str, Any]]): def deploy( self, stage_outputs: Dict[str, Dict[str, Any]], disable_prompt: bool = False ): - with super().deploy(stage_outputs, disable_prompt): + self.check_immutable_fields() + + # No need to run terraform init here as it's being called when running the + # terraform show command, inside check_immutable_fields + with super().deploy(stage_outputs, disable_prompt, terraform_init=False): env_mapping = {} # DigitalOcean terraform remote state using Spaces Bucket # assumes aws credentials thus we set them to match spaces credentials @@ -246,6 +253,55 @@ def deploy( with modified_environ(**env_mapping): yield + def check_immutable_fields(self): + nebari_config_state = self.get_nebari_config_state() + if not nebari_config_state: + return + + # compute diff of remote/prior and current nebari config + nebari_config_diff = utils.JsonDiff( + nebari_config_state.model_dump(), self.config.model_dump() + ) + # check if any changed fields are immutable + for keys, old, new in nebari_config_diff.modified(): + bottom_level_schema = self.config + if len(keys) > 1: + for key in keys[:-1]: + try: + bottom_level_schema = getattr(bottom_level_schema, key) + except AttributeError as e: + if isinstance(bottom_level_schema, dict): + # handle case where value is a dict + bottom_level_schema = bottom_level_schema[key] + else: + raise e + extra_field_schema = schema.ExtraFieldSchema( + **bottom_level_schema.model_fields[keys[-1]].json_schema_extra or {} + ) + if extra_field_schema.immutable: + key_path = ".".join(keys) + raise ValueError( + f'Attempting to change immutable field "{key_path}" ("{old}"->"{new}") in Nebari config file. Immutable fields cannot be changed after initial deployment.' + ) + + def get_nebari_config_state(self): + directory = str(self.output_directory / self.stage_prefix) + tf_state = terraform.show(directory) + nebari_config_state = None + + # get nebari config from state + for resource in ( + tf_state.get("values", {}).get("root_module", {}).get("resources", []) + ): + if resource["address"] == "terraform_data.nebari_config": + from nebari.plugins import nebari_plugin_manager + + nebari_config_state = nebari_plugin_manager.config_schema( + **resource["values"]["input"] + ) + break + return nebari_config_state + @contextlib.contextmanager def destroy( self, stage_outputs: Dict[str, Dict[str, Any]], status: Dict[str, bool] diff --git a/src/_nebari/stages/terraform_state/template/aws/modules/terraform-state/main.tf b/src/_nebari/stages/terraform_state/template/aws/modules/terraform-state/main.tf index 2b0561dd73..2931f153bf 100644 --- a/src/_nebari/stages/terraform_state/template/aws/modules/terraform-state/main.tf +++ b/src/_nebari/stages/terraform_state/template/aws/modules/terraform-state/main.tf @@ -1,3 +1,7 @@ +resource "aws_kms_key" "tf-state-key" { + enable_key_rotation = true +} + resource "aws_s3_bucket" "terraform-state" { bucket = "${var.name}-terraform-state" @@ -16,6 +20,28 @@ resource "aws_s3_bucket" "terraform-state" { } } +resource "aws_s3_bucket_public_access_block" "terraform-state" { + bucket = aws_s3_bucket.terraform-state.id + ignore_public_acls = true + block_public_acls = true + block_public_policy = true + restrict_public_buckets = true +} + +resource "aws_s3_bucket_server_side_encryption_configuration" "terraform-state" { + bucket = aws_s3_bucket.terraform-state.id + + rule { + apply_server_side_encryption_by_default { + kms_master_key_id = aws_kms_key.tf-state-key.arn + sse_algorithm = "aws:kms" + } + } + # // AWS may return HTTP 409 if PutBucketEncryption is called immediately after S3 + # bucket creation. Adding dependency avoids concurrent requests. + depends_on = [aws_s3_bucket_public_access_block.terraform-state] +} + resource "aws_dynamodb_table" "terraform-state-lock" { name = "${var.name}-terraform-state-lock" diff --git a/src/_nebari/stages/terraform_state/template/azure/modules/terraform-state/main.tf b/src/_nebari/stages/terraform_state/template/azure/modules/terraform-state/main.tf index a13f613cf9..544aa578c7 100644 --- a/src/_nebari/stages/terraform_state/template/azure/modules/terraform-state/main.tf +++ b/src/_nebari/stages/terraform_state/template/azure/modules/terraform-state/main.tf @@ -12,6 +12,7 @@ resource "azurerm_storage_account" "terraform-state-storage-account" { account_tier = "Standard" account_replication_type = "GRS" tags = var.tags + min_tls_version = "TLS1_2" identity { type = "SystemAssigned" diff --git a/src/_nebari/stages/tf_objects.py b/src/_nebari/stages/tf_objects.py index f5911a72cb..04c6d434aa 100644 --- a/src/_nebari/stages/tf_objects.py +++ b/src/_nebari/stages/tf_objects.py @@ -1,4 +1,4 @@ -from _nebari.provider.terraform import Data, Provider, TerraformBackend +from _nebari.provider.terraform import Data, Provider, Resource, TerraformBackend from _nebari.utils import ( AZURE_TF_STATE_RESOURCE_GROUP_SUFFIX, construct_azure_resource_group_name, @@ -17,7 +17,6 @@ def NebariKubernetesProvider(nebari_config: schema.Main): Provider("aws", region=nebari_config.amazon_web_services.region), Provider( "kubernetes", - experiments={"manifest_resource": True}, host="${data.aws_eks_cluster.default.endpoint}", cluster_ca_certificate="${base64decode(data.aws_eks_cluster.default.certificate_authority[0].data)}", token="${data.aws_eks_cluster_auth.default.token}", @@ -25,7 +24,6 @@ def NebariKubernetesProvider(nebari_config: schema.Main): ) return Provider( "kubernetes", - experiments={"manifest_resource": True}, ) @@ -117,3 +115,7 @@ def NebariTerraformState(directory: str, nebari_config: schema.Main): ) else: raise NotImplementedError("state not implemented") + + +def NebariConfig(nebari_config: schema.Main): + return Resource("terraform_data", "nebari_config", input=nebari_config.model_dump()) diff --git a/src/_nebari/subcommands/deploy.py b/src/_nebari/subcommands/deploy.py index 0aa861027f..fe4cddf1df 100644 --- a/src/_nebari/subcommands/deploy.py +++ b/src/_nebari/subcommands/deploy.py @@ -84,6 +84,11 @@ def deploy( stages.remove(stage) rich.print("Skipping remote state provision") + # Digital Ocean support deprecation warning -- Nebari 2024.7.1 + if config.provider == "do" and not disable_prompt: + msg = "Digital Ocean support is currently being deprecated and will be removed in a future release. Would you like to continue?" + typer.confirm(msg) + deploy_configuration( config, stages, diff --git a/src/_nebari/subcommands/init.py b/src/_nebari/subcommands/init.py index 9040f3d201..743d30cb40 100644 --- a/src/_nebari/subcommands/init.py +++ b/src/_nebari/subcommands/init.py @@ -106,6 +106,7 @@ class InitInputs(schema.Base): ssl_cert_email: Optional[schema.email_pydantic] = None disable_prompt: bool = False output: pathlib.Path = pathlib.Path("nebari-config.yaml") + explicit: int = 0 def enum_to_list(enum_cls): @@ -152,7 +153,7 @@ def handle_init(inputs: InitInputs, config_schema: BaseModel): try: write_configuration( inputs.output, - config, + config if not inputs.explicit else config_schema(**config), mode="x", ) except FileExistsError: @@ -565,6 +566,13 @@ def init( "-o", help="Output file path for the rendered config file.", ), + explicit: int = typer.Option( + 0, + "--explicit", + "-e", + count=True, + help="Write explicit nebari config file (advanced users only).", + ), ): """ Create and initialize your [purple]nebari-config.yaml[/purple] file. @@ -587,6 +595,13 @@ def init( inputs.cloud_provider = check_cloud_provider_creds( cloud_provider, disable_prompt ) + + # Digital Ocean deprecation warning -- Nebari 2024.7.1 + if inputs.cloud_provider == ProviderEnum.do.value.lower(): + rich.print( + ":warning: Digital Ocean support is being deprecated and support will be removed in the future. :warning:\n" + ) + inputs.region = check_cloud_provider_region(region, inputs.cloud_provider) inputs.kubernetes_version = check_cloud_provider_kubernetes_version( kubernetes_version, inputs.cloud_provider, inputs.region @@ -604,6 +619,7 @@ def init( inputs.ssl_cert_email = ssl_cert_email inputs.disable_prompt = disable_prompt inputs.output = output + inputs.explicit = explicit from nebari.plugins import nebari_plugin_manager @@ -653,6 +669,7 @@ def guided_init_wizard(ctx: typer.Context, guided_init: str): "\n\t❗️ [purple]local[/purple] requires Docker and Kubernetes running on your local machine. " "[italic]Currently only available on Linux OS.[/italic]" "\n\t❗️ [purple]existing[/purple] refers to an existing Kubernetes cluster that Nebari can be deployed on.\n" + "\n\t❗️ [red]Digital Ocean[/red] is currently being deprecated and support will be removed in the future.\n" ) ) # try: @@ -726,7 +743,7 @@ def guided_init_wizard(ctx: typer.Context, guided_init: str): # DOMAIN NAME rich.print( ( - "\n\n 🪴 Great! Now you can provide a valid domain name (i.e. the URL) to access your Nebri instance. " + "\n\n 🪴 Great! Now you can provide a valid domain name (i.e. the URL) to access your Nebari instance. " "This should be a domain that you own. Default if unspecified is the IP of the load balancer.\n\n" ) ) @@ -795,21 +812,9 @@ def guided_init_wizard(ctx: typer.Context, guided_init: str): qmark=qmark, ).unsafe_ask() - org_name = questionary.text( - f"Which user or organization will this repository live under? ({repo_url.format(git_provider=git_provider, org_name='', repo_name='')})", - qmark=qmark, - ).unsafe_ask() - - repo_name = questionary.text( - f"And what will the name of this repository be? ({repo_url.format(git_provider=git_provider, org_name=org_name, repo_name='')})", - qmark=qmark, - ).unsafe_ask() - - inputs.repository = repo_url.format( - git_provider=git_provider, org_name=org_name, repo_name=repo_name - ) - if git_provider == GitRepoEnum.github.value.lower(): + inputs.ci_provider = CiEnum.github_actions.value.lower() + inputs.repository_auto_provision = questionary.confirm( f"Would you like nebari to create a remote repository on {git_provider}?", default=False, @@ -817,11 +822,26 @@ def guided_init_wizard(ctx: typer.Context, guided_init: str): auto_enter=False, ).unsafe_ask() - if not disable_checks and inputs.repository_auto_provision: - check_repository_creds(ctx, git_provider) + if inputs.repository_auto_provision: + org_name = questionary.text( + f"Which user or organization will this repository live under? ({repo_url.format(git_provider=git_provider, org_name='', repo_name='')})", + qmark=qmark, + ).unsafe_ask() + + repo_name = questionary.text( + f"And what will the name of this repository be? ({repo_url.format(git_provider=git_provider, org_name=org_name, repo_name='')})", + qmark=qmark, + ).unsafe_ask() + + inputs.repository = repo_url.format( + git_provider=git_provider, + org_name=org_name, + repo_name=repo_name, + ) + + if not disable_checks: + check_repository_creds(ctx, git_provider) - if git_provider == GitRepoEnum.github.value.lower(): - inputs.ci_provider = CiEnum.github_actions.value.lower() elif git_provider == GitRepoEnum.gitlab.value.lower(): inputs.ci_provider = CiEnum.gitlab_ci.value.lower() @@ -894,6 +914,14 @@ def guided_init_wizard(ctx: typer.Context, guided_init: str): ) inputs.kubernetes_version = kubernetes_version + # EXPLICIT CONFIG + inputs.explicit = questionary.confirm( + "Would you like the nebari config to show all available options? (recommended for advanced users only)", + default=False, + qmark=qmark, + auto_enter=False, + ).unsafe_ask() + from nebari.plugins import nebari_plugin_manager config_schema = nebari_plugin_manager.config_schema @@ -909,16 +937,19 @@ def guided_init_wizard(ctx: typer.Context, guided_init: str): ) ) - base_cmd = f"nebari init {inputs.cloud_provider}" + base_cmd = f"nebari init {inputs.cloud_provider.value}" def if_used(key, model=inputs, ignore_list=["cloud_provider"]): if key not in ignore_list: - b = "--{key} {value}" value = getattr(model, key) - if isinstance(value, str) and (value != "" or value is not None): - return b.format(key=key, value=value).replace("_", "-") - if isinstance(value, bool) and value: - return b.format(key=key, value=value).replace("_", "-") + if isinstance(value, enum.Enum): + return f"--{key} {value.value}".replace("_", "-") + elif isinstance(value, bool): + if value: + return f"--{key}".replace("_", "-") + elif isinstance(value, (int, str)): + if value: + return f"--{key} {value}".replace("_", "-") cmds = " ".join( [ diff --git a/src/_nebari/upgrade.py b/src/_nebari/upgrade.py index e35d7ea309..6536612f2d 100644 --- a/src/_nebari/upgrade.py +++ b/src/_nebari/upgrade.py @@ -1,3 +1,9 @@ +""" +This file contains the upgrade logic for Nebari. +Each release of Nebari requires an upgrade step class (which is a child class of UpgradeStep) to be created. +When a user runs `nebari upgrade -c nebari-config.yaml`, then the do_upgrade function will then run through all required upgrade steps to bring the config file up to date with the current version of Nebari. +""" + import json import logging import re @@ -8,12 +14,17 @@ from pathlib import Path from typing import Any, ClassVar, Dict +import kubernetes.client +import kubernetes.config +import requests import rich from packaging.version import Version from pydantic import ValidationError from rich.prompt import Prompt +from typing_extensions import override from _nebari.config import backup_configuration +from _nebari.keycloak import get_keycloak_admin from _nebari.stages.infrastructure import ( provider_enum_default_node_groups_map, provider_enum_name_map, @@ -39,6 +50,20 @@ def do_upgrade(config_filename, attempt_fixes=False): + """ + Perform an upgrade of the Nebari configuration file. + + This function loads the YAML configuration file, checks for deprecated keys, + validates the current version, and if necessary, upgrades the configuration + to the latest version of Nebari. + + Args: + config_filename (str): The path to the configuration file. + attempt_fixes (bool): Whether to attempt automatic fixes for validation errors. + + Returns: + None + """ config = load_yaml(config_filename) if config.get("qhub_version"): rich.print( @@ -87,10 +112,24 @@ def do_upgrade(config_filename, attempt_fixes=False): class UpgradeStep(ABC): + """ + Abstract base class representing an upgrade step. + + Attributes: + _steps (ClassVar[Dict[str, Any]]): Class variable holding registered upgrade steps. + version (ClassVar[str]): The version of the upgrade step. + """ + _steps: ClassVar[Dict[str, Any]] = {} version: ClassVar[str] = "" def __init_subclass__(cls): + """ + Initializes a subclass of UpgradeStep. + + This method validates the version string and registers the subclass + in the _steps dictionary. + """ try: parsed_version = Version(cls.version) except ValueError as exc: @@ -112,6 +151,15 @@ def clear_steps_registry(cls): @classmethod def has_step(cls, version): + """ + Checks if there is an upgrade step for a given version. + + Args: + version (str): The version to check. + + Returns: + bool: True if the step exists, False otherwise. + """ return version in cls._steps @classmethod @@ -121,6 +169,16 @@ def upgrade( """ Runs through all required upgrade steps (i.e. relevant subclasses of UpgradeStep). Calls UpgradeStep.upgrade_step for each. + + Args: + config (dict): The current configuration dictionary. + start_version (str): The starting version of the configuration. + finish_version (str): The target version for the configuration. + config_filename (str): The path to the configuration file. + attempt_fixes (bool): Whether to attempt automatic fixes for validation errors. + + Returns: + dict: The updated configuration dictionary. """ starting_ver = rounded_ver_parse(start_version or "0.0.0") finish_ver = rounded_ver_parse(finish_version) @@ -156,9 +214,19 @@ def upgrade( return config def get_version(self): + """ + Returns: + str: The version of the upgrade step. + """ return self.version def requires_nebari_version_field(self): + """ + Checks if the nebari_version field is required for this upgrade step. + + Returns: + bool: True if the nebari_version field is required, False otherwise. + """ return rounded_ver_parse(self.version) > rounded_ver_parse("0.3.13") def upgrade_step(self, config, start_version, config_filename, *args, **kwargs): @@ -174,6 +242,14 @@ def upgrade_step(self, config, start_version, config_filename, *args, **kwargs): It should normally be left as-is for all upgrades. Use _version_specific_upgrade below for any actions that are only required for the particular upgrade you are creating. + + Args: + config (dict): The current configuration dictionary. + start_version (str): The starting version of the configuration. + config_filename (str): The path to the configuration file. + + Returns: + dict: The updated configuration dictionary. """ finish_version = self.get_version() __rounded_finish_version__ = str(rounded_ver_parse(finish_version)) @@ -191,11 +267,32 @@ def upgrade_step(self, config, start_version, config_filename, *args, **kwargs): config["nebari_version"] = self.version def contains_image_and_tag(s: str) -> bool: - # match on `quay.io/nebari/nebari-<...>:YYYY.MM.XX`` + """ + Check if the string matches the Nebari image pattern. + + Args: + s (str): The string to check. + + Returns: + bool: True if the string matches the pattern, False otherwise. + """ pattern = r"^quay\.io\/nebari\/nebari-(jupyterhub|jupyterlab|dask-worker)(-gpu)?:\d{4}\.\d+\.\d+$" return bool(re.match(pattern, s)) - def replace_image_tag_legacy(image, start_version, new_version): + def replace_image_tag_legacy( + image: str, start_version: str, new_version: str + ) -> str: + """ + Replace legacy image tags with the new version. + + Args: + image (str): The current image string. + start_version (str): The starting version of the image. + new_version (str): The new version to replace with. + + Returns: + str: The updated image string with the new version, or None if no match. + """ start_version_regex = start_version.replace(".", "\\.") if not start_version: start_version_regex = "0\\.[0-3]\\.[0-9]{1,2}" @@ -210,6 +307,17 @@ def replace_image_tag_legacy(image, start_version, new_version): return None def replace_image_tag(s: str, new_version: str, config_path: str) -> str: + """ + Replace the image tag with the new version. + + Args: + s (str): The current image string. + new_version (str): The new version to replace with. + config_path (str): The path to the configuration file. + + Returns: + str: The updated image string with the new version, or the original string if no changes. + """ legacy_replacement = replace_image_tag_legacy(s, start_version, new_version) if legacy_replacement: return legacy_replacement @@ -230,6 +338,17 @@ def replace_image_tag(s: str, new_version: str, config_path: str) -> str: return s def set_nested_item(config: dict, config_path: list, value: str): + """ + Set a nested item in the configuration dictionary. + + Args: + config (dict): The configuration dictionary. + config_path (list): The path to the item to set. + value (str): The value to set. + + Returns: + None + """ config_path = config_path.split(".") for k in config_path[:-1]: try: @@ -243,7 +362,21 @@ def set_nested_item(config: dict, config_path: list, value: str): pass config[config_path[-1]] = value - def update_image_tag(config, config_path, current_image, new_version): + def update_image_tag( + config: dict, config_path: str, current_image: str, new_version: str + ) -> dict: + """ + Update the image tag in the configuration. + + Args: + config (dict): The configuration dictionary. + config_path (str): The path to the item to update. + current_image (str): The current image string. + new_version (str): The new version to replace with. + + Returns: + dict: The updated configuration dictionary. + """ new_image = replace_image_tag(current_image, new_version, config_path) if new_image != current_image: set_nested_item(config, config_path, new_image) @@ -288,7 +421,17 @@ def _version_specific_upgrade( self, config, start_version, config_filename, *args, **kwargs ): """ + Perform version-specific upgrade tasks. + Override this method in subclasses if you need to do anything specific to your version. + + Args: + config (dict): The current configuration dictionary. + start_version (str): The starting version of the configuration. + config_filename (str): The path to the configuration file. + + Returns: + dict: The updated configuration dictionary. """ return config @@ -296,6 +439,7 @@ def _version_specific_upgrade( class Upgrade_0_3_12(UpgradeStep): version = "0.3.12" + @override def _version_specific_upgrade( self, config, start_version, config_filename, *args, **kwargs ): @@ -316,11 +460,13 @@ def _version_specific_upgrade( class Upgrade_0_4_0(UpgradeStep): version = "0.4.0" + @override def _version_specific_upgrade( self, config, start_version, config_filename: Path, *args, **kwargs ): """ - Upgrade to Keycloak. + This version of Nebari introduces Keycloak for authentication, removes deprecated fields, + and generates a default password for the Keycloak root user. """ security = config.get("security", {}) users = security.get("users", {}) @@ -448,6 +594,7 @@ def _version_specific_upgrade( class Upgrade_0_4_1(UpgradeStep): version = "0.4.1" + @override def _version_specific_upgrade( self, config, start_version, config_filename: Path, *args, **kwargs ): @@ -474,6 +621,7 @@ def _version_specific_upgrade( class Upgrade_2023_4_2(UpgradeStep): version = "2023.4.2" + @override def _version_specific_upgrade( self, config, start_version, config_filename: Path, *args, **kwargs ): @@ -508,6 +656,7 @@ def _version_specific_upgrade( class Upgrade_2023_7_1(UpgradeStep): version = "2023.7.1" + @override def _version_specific_upgrade( self, config, start_version, config_filename: Path, *args, **kwargs ): @@ -526,6 +675,7 @@ def _version_specific_upgrade( class Upgrade_2023_7_2(UpgradeStep): version = "2023.7.2" + @override def _version_specific_upgrade( self, config, start_version, config_filename: Path, *args, **kwargs ): @@ -547,11 +697,22 @@ def _version_specific_upgrade( class Upgrade_2023_10_1(UpgradeStep): + """ + Upgrade step for Nebari version 2023.10.1 + + Note: + Upgrading to 2023.10.1 is considered high-risk because it includes a major refactor + to introduce the extension mechanism system. This version introduces significant + changes, including the support for third-party plugins, upgrades JupyterHub to version 3.1, + and deprecates certain components such as CDS Dashboards, ClearML, Prefect, and kbatch. + """ + version = "2023.10.1" # JupyterHub Helm chart 2.0.0 (app version 3.0.0) requires K8S Version >=1.23. (reference: https://z2jh.jupyter.org/en/stable/) # This released has been tested against 1.26 min_k8s_version = 1.26 + @override def _version_specific_upgrade( self, config, start_version, config_filename: Path, *args, **kwargs ): @@ -654,8 +815,16 @@ def _version_specific_upgrade( class Upgrade_2023_11_1(UpgradeStep): + """ + Upgrade step for Nebari version 2023.11.1 + + Note: + - ClearML, Prefect, and kbatch are no longer supported in this version. + """ + version = "2023.11.1" + @override def _version_specific_upgrade( self, config, start_version, config_filename: Path, *args, **kwargs ): @@ -672,8 +841,16 @@ def _version_specific_upgrade( class Upgrade_2023_12_1(UpgradeStep): + """ + Upgrade step for Nebari version 2023.12.1 + + Note: + - This is the last version that supports the jupyterlab-videochat extension. + """ + version = "2023.12.1" + @override def _version_specific_upgrade( self, config, start_version, config_filename: Path, *args, **kwargs ): @@ -691,8 +868,16 @@ def _version_specific_upgrade( class Upgrade_2024_1_1(UpgradeStep): + """ + Upgrade step for Nebari version 2024.1.1 + + Note: + - jupyterlab-videochat, retrolab, jupyter-tensorboard, jupyterlab-conda-store, and jupyter-nvdashboard are no longer supported. + """ + version = "2024.1.1" + @override def _version_specific_upgrade( self, config, start_version, config_filename: Path, *args, **kwargs ): @@ -713,6 +898,7 @@ def _version_specific_upgrade( class Upgrade_2024_3_1(UpgradeStep): version = "2024.3.1" + @override def _version_specific_upgrade( self, config, start_version, config_filename: Path, *args, **kwargs ): @@ -724,6 +910,7 @@ def _version_specific_upgrade( class Upgrade_2024_3_2(UpgradeStep): version = "2024.3.2" + @override def _version_specific_upgrade( self, config, start_version, config_filename: Path, *args, **kwargs ): @@ -735,6 +922,7 @@ def _version_specific_upgrade( class Upgrade_2024_3_3(UpgradeStep): version = "2024.3.3" + @override def _version_specific_upgrade( self, config, start_version, config_filename: Path, *args, **kwargs ): @@ -744,8 +932,16 @@ def _version_specific_upgrade( class Upgrade_2024_4_1(UpgradeStep): + """ + Upgrade step for Nebari version 2024.4.1 + + Note: + - Adds default configuration for node groups if not already defined. + """ + version = "2024.4.1" + @override def _version_specific_upgrade( self, config, start_version, config_filename: Path, *args, **kwargs ): @@ -777,6 +973,7 @@ def _version_specific_upgrade( class Upgrade_2024_5_1(UpgradeStep): version = "2024.5.1" + @override def _version_specific_upgrade( self, config, start_version, config_filename: Path, *args, **kwargs ): @@ -786,11 +983,137 @@ def _version_specific_upgrade( class Upgrade_2024_6_1(UpgradeStep): + """ + Upgrade step for version 2024.6.1 + + This upgrade includes: + - Manual updates for kube-prometheus-stack CRDs if monitoring is enabled. + - Prompts to upgrade GCP node groups to more cost-efficient instances. + """ + version = "2024.6.1" + @override def _version_specific_upgrade( self, config, start_version, config_filename: Path, *args, **kwargs ): + # Prompt users to manually update kube-prometheus-stack CRDs if monitoring is enabled + if config.get("monitoring", {}).get("enabled", True): + + crd_urls = [ + "https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.73.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagerconfigs.yaml", + "https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.73.0/example/prometheus-operator-crd/monitoring.coreos.com_alertmanagers.yaml", + "https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.73.0/example/prometheus-operator-crd/monitoring.coreos.com_podmonitors.yaml", + "https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.73.0/example/prometheus-operator-crd/monitoring.coreos.com_probes.yaml", + "https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.73.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusagents.yaml", + "https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.73.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml", + "https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.73.0/example/prometheus-operator-crd/monitoring.coreos.com_prometheusrules.yaml", + "https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.73.0/example/prometheus-operator-crd/monitoring.coreos.com_scrapeconfigs.yaml", + "https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.73.0/example/prometheus-operator-crd/monitoring.coreos.com_servicemonitors.yaml", + "https://raw.githubusercontent.com/prometheus-operator/prometheus-operator/v0.73.0/example/prometheus-operator-crd/monitoring.coreos.com_thanosrulers.yaml", + ] + daemonset_name = "prometheus-node-exporter" + namespace = config.get("namespace", "default") + + # We're upgrading from version 30.1.0 to 58.4.0. This is a major upgrade and requires manual intervention. + # See https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/README.md#upgrading-chart + # for more information on why the following commands are necessary. + commands = "[cyan bold]" + for url in crd_urls: + commands += f"kubectl apply --server-side --force-conflicts -f {url}\n" + commands += f"kubectl delete daemonset -l app={daemonset_name} --namespace {namespace}\n" + commands += "[/cyan bold]" + + rich.print( + "\n ⚠️ Warning ⚠️" + "\n-> [red bold]Nebari version 2024.6.1 comes with a new version of Grafana. Any custom dashboards that you created will be deleted after upgrading Nebari. Make sure to [link=https://grafana.com/docs/grafana/latest/dashboards/share-dashboards-panels/#export-a-dashboard-as-json]export them as JSON[/link] so you can [link=https://grafana.com/docs/grafana/latest/dashboards/build-dashboards/import-dashboards/#import-a-dashboard]import them[/link] again afterwards.[/red bold]" + f"\n-> [red bold]Before upgrading, kube-prometheus-stack CRDs need to be updated and the {daemonset_name} daemonset needs to be deleted.[/red bold]" + ) + run_commands = Prompt.ask( + "\nDo you want Nebari to update the kube-prometheus-stack CRDs and delete the prometheus-node-exporter for you? If not, you'll have to do it manually.", + choices=["y", "N"], + default="N", + ) + + # By default, rich wraps lines by splitting them into multiple lines. This is + # far from ideal, as users copy-pasting the commands will get errors when running them. + # To avoid this, we use a rich console with a larger width to print the entire commands + # and let the terminal wrap them if needed. + console = rich.console.Console(width=220) + if run_commands == "y": + try: + kubernetes.config.load_kube_config() + except kubernetes.config.config_exception.ConfigException: + rich.print( + "[red bold]No default kube configuration file was found. Make sure to [link=https://www.nebari.dev/docs/how-tos/debug-nebari#generating-the-kubeconfig]have one pointing to your Nebari cluster[/link] before upgrading.[/red bold]" + ) + exit() + current_kube_context = kubernetes.config.list_kube_config_contexts()[1] + cluster_name = current_kube_context["context"]["cluster"] + rich.print( + f"The following commands will be run for the [cyan bold]{cluster_name}[/cyan bold] cluster" + ) + Prompt.ask("Hit enter to show the commands") + console.print(commands) + + Prompt.ask("Hit enter to continue") + # We need to add a special constructor to the yaml loader to handle a specific + # tag as otherwise the kubernetes API will fail when updating the CRD. + yaml.constructor.add_constructor( + "tag:yaml.org,2002:value", lambda loader, node: node.value + ) + for url in crd_urls: + response = requests.get(url) + response.raise_for_status() + crd = yaml.load(response.text) + crd_name = crd["metadata"]["name"] + api_instance = kubernetes.client.ApiextensionsV1Api() + try: + api_response = api_instance.read_custom_resource_definition( + name=crd_name + ) + except kubernetes.client.exceptions.ApiException: + api_response = api_instance.create_custom_resource_definition( + body=crd + ) + else: + api_response = api_instance.patch_custom_resource_definition( + name=crd["metadata"]["name"], body=crd + ) + + api_instance = kubernetes.client.AppsV1Api() + api_response = api_instance.list_namespaced_daemon_set( + namespace=namespace, label_selector=f"app={daemonset_name}" + ) + if api_response.items: + api_instance.delete_namespaced_daemon_set( + name=api_response.items[0].metadata.name, + namespace=namespace, + ) + + rich.print( + f"The kube-prometheus-stack CRDs have been updated and the {daemonset_name} daemonset has been deleted." + ) + else: + rich.print( + "[red bold]Before upgrading, you need to manually delete the prometheus-node-exporter daemonset and update the kube-prometheus-stack CRDs. To do that, please run the following commands.[/red bold]" + ) + Prompt.ask("Hit enter to show the commands") + console.print(commands) + + Prompt.ask("Hit enter to continue") + continue_ = Prompt.ask( + f"Have you backed up your custom dashboards (if necessary), deleted the {daemonset_name} daemonset and updated the kube-prometheus-stack CRDs?", + choices=["y", "N"], + default="N", + ) + if not continue_ == "y": + rich.print( + f"[red bold]You must back up your custom dashboards (if necessary), delete the {daemonset_name} daemonset and update the kube-prometheus-stack CRDs before upgrading to [green]{self.version}[/green] (or later).[/bold red]" + ) + exit() + + # Prompt users to upgrade to the new default node groups for GCP if (provider := config.get("provider", "")) == ProviderEnum.gcp.value: provider_full_name = provider_enum_name_map[provider] if not config.get(provider_full_name, {}).get("node_groups", {}): @@ -800,6 +1123,9 @@ def _version_specific_upgrade( The default node groups for GCP have been changed to cost efficient e2 family nodes reducing the running cost of Nebari on GCP by ~50%. This change will affect your current deployment, and will result in ~15 minutes of downtime during the upgrade step as the node groups are switched out, but shouldn't result in data loss. + [red bold]Note: If upgrading to the new node types, the upgrade process will take longer than usual. For this upgrade only, you'll likely see a timeout \ + error and need to restart the deployment process afterwards in order to upgrade successfully.[/red bold] + As always, make sure to backup data before upgrading. See https://www.nebari.dev/docs/how-tos/manual-backup for more information. Would you like to upgrade to the cost effective node groups [purple]{config_filename}[/purple]? @@ -857,6 +1183,151 @@ def _version_specific_upgrade( return config +class Upgrade_2024_7_1(UpgradeStep): + """ + Upgrade step for Nebari version 2024.7.1 + + Note: + - Digital Ocean deprecation warning. + """ + + version = "2024.7.1" + + @override + def _version_specific_upgrade( + self, config, start_version, config_filename: Path, *args, **kwargs + ): + if config.get("provider", "") == ProviderEnum.do.value: + rich.print("\n ⚠️ Deprecation Warning ⚠️") + rich.print( + "-> Digital Ocean support is currently being deprecated and will be removed in a future release.", + ) + rich.print("") + return config + + +class Upgrade_2024_9_1(UpgradeStep): + """ + Upgrade step for Nebari version 2024.9.1 + + """ + + version = "2024.9.1" + + @override + def _version_specific_upgrade( + self, config, start_version, config_filename: Path, *args, **kwargs + ): + if config.get("provider", "") == ProviderEnum.azure.value: + rich.print("\n ⚠️ Upgrade Warning ⚠️") + rich.print( + textwrap.dedent( + """ + -> Please ensure no users are currently logged in prior to deploying this update. The node groups will be destroyed and recreated during the deployment process causing a downtime of approximately 15 minutes. + + Due to an upstream issue, Azure Nebari deployments may raise an error when deploying for the first time after this upgrade. Waiting for a few minutes and then re-running `nebari deploy` should resolve the issue. More info can be found at [green][link=https://github.com/nebari-dev/nebari/issues/2640]issue #2640[/link][/green].""" + ), + ) + rich.print("") + elif config.get("provider", "") == ProviderEnum.do.value: + rich.print("\n ⚠️ Deprecation Warning ⚠️") + rich.print( + "-> Digital Ocean support is currently being deprecated and will be removed in a future release.", + ) + rich.print("") + + rich.print("\n ⚠️ Upgrade Warning ⚠️") + + text = textwrap.dedent( + """ + Please ensure no users are currently logged in prior to deploying this + update. + + Nebari [green]2024.9.1[/green] introduces changes to how group + directories are mounted in JupyterLab pods. + + Previously, every Keycloak group in the Nebari realm automatically created a + shared directory at ~/shared/, accessible to all group members + in their JupyterLab pods. + + Starting with Nebari [green]2024.9.1[/green], only groups assigned the + JupyterHub client role [magenta]allow-group-directory-creation[/magenta] will have their + directories mounted. + + By default, the admin, analyst, and developer groups will have this + role assigned during the upgrade. For other groups, you'll now need to + assign this role manually in the Keycloak UI to have their directories + mounted. + + For more details check our [green][link=https://www.nebari.dev/docs/references/release/]release notes[/link][/green]. + """ + ) + rich.print(text) + keycloak_admin = None + + # Prompt the user for role assignment (if yes, transforms the response into bool) + assign_roles = ( + Prompt.ask( + "[bold]Would you like Nebari to assign the corresponding role to all of your current groups automatically?[/bold]", + choices=["y", "N"], + default="N", + ).lower() + == "y" + ) + + if assign_roles: + # In case this is done with a local deployment + import urllib3 + + urllib3.disable_warnings() + + keycloak_admin = get_keycloak_admin( + server_url=f"https://{config['domain']}/auth/", + username="root", + password=config["security"]["keycloak"]["initial_root_password"], + ) + + # Proceed with updating group permissions + client_id = keycloak_admin.get_client_id("jupyterhub") + role_name = "allow-group-directory-creation-role" + role_id = keycloak_admin.get_client_role_id( + client_id=client_id, role_name=role_name + ) + role_representation = keycloak_admin.get_role_by_id(role_id=role_id) + + # Fetch all groups and groups with the role + all_groups = keycloak_admin.get_groups() + groups_with_role = keycloak_admin.get_client_role_groups( + client_id=client_id, role_name=role_name + ) + groups_with_role_ids = {group["id"] for group in groups_with_role} + + # Identify groups without the role + groups_without_role = [ + group for group in all_groups if group["id"] not in groups_with_role_ids + ] + + if groups_without_role: + group_names = ", ".join(group["name"] for group in groups_without_role) + rich.print( + f"\n[bold]Updating the following groups with the required permissions:[/bold] {group_names}\n" + ) + for group in groups_without_role: + keycloak_admin.assign_group_client_roles( + group_id=group["id"], + client_id=client_id, + roles=[role_representation], + ) + rich.print( + "\n[green]Group permissions have been updated successfully.[/green]" + ) + else: + rich.print( + "\n[green]All groups already have the required permissions.[/green]" + ) + return config + + __rounded_version__ = str(rounded_ver_parse(__version__)) # Manually-added upgrade steps must go above this line @@ -864,4 +1335,11 @@ def _version_specific_upgrade( # Always have a way to upgrade to the latest full version number, even if no customizations # Don't let dev/prerelease versions cloud things class UpgradeLatest(UpgradeStep): + """ + Upgrade step for the latest available version. + + This class ensures there is always an upgrade path to the latest version, + even if no specific upgrade steps are defined for the current version. + """ + version = __rounded_version__ diff --git a/src/_nebari/utils.py b/src/_nebari/utils.py index 3ae4ad4bd8..5f0877666a 100644 --- a/src/_nebari/utils.py +++ b/src/_nebari/utils.py @@ -1,5 +1,7 @@ import contextlib +import enum import functools +import json import os import re import secrets @@ -11,7 +13,7 @@ import time import warnings from pathlib import Path -from typing import Dict, List, Set +from typing import Any, Dict, List, Set from ruamel.yaml import YAML @@ -44,14 +46,18 @@ def change_directory(directory): os.chdir(current_directory) -def run_subprocess_cmd(processargs, **kwargs): +def run_subprocess_cmd(processargs, prefix=b"", capture_output=False, **kwargs): """Runs subprocess command with realtime stdout logging with optional line prefix.""" - if "prefix" in kwargs: - line_prefix = f"[{kwargs['prefix']}]: ".encode("utf-8") - kwargs.pop("prefix") + if prefix: + line_prefix = f"[{prefix}]: ".encode("utf-8") else: line_prefix = b"" + if capture_output: + stderr_stream = subprocess.PIPE + else: + stderr_stream = subprocess.STDOUT + timeout = 0 if "timeout" in kwargs: timeout = kwargs.pop("timeout") # in seconds @@ -62,7 +68,7 @@ def run_subprocess_cmd(processargs, **kwargs): processargs, **kwargs, stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, + stderr=stderr_stream, preexec_fn=os.setsid, ) # Set timeout thread @@ -78,7 +84,8 @@ def kill_process(): timeout_timer = threading.Timer(timeout, kill_process) timeout_timer.start() - for line in iter(lambda: process.stdout.readline(), b""): + print_stream = process.stderr if capture_output else process.stdout + for line in iter(lambda: print_stream.readline(), b""): full_line = line_prefix + line if strip_errors: full_line = full_line.decode("utf-8") @@ -89,15 +96,26 @@ def kill_process(): sys.stdout.buffer.write(full_line) sys.stdout.flush() + print_stream.close() + + output = [] + if capture_output: + for line in iter(lambda: process.stdout.readline(), b""): + output.append(line) + process.stdout.close() if timeout_timer is not None: timeout_timer.cancel() - process.stdout.close() - return process.wait( + exit_code = process.wait( timeout=10 ) # Should already have finished because we have drained stdout + if capture_output: + return exit_code, b"".join(output) + else: + return exit_code, None + def load_yaml(config_filename: Path): """ @@ -353,3 +371,110 @@ def check_environment_variables(variables: Set[str], reference: str) -> None: f"""Missing the following required environment variables: {required_variables}\n Please see the documentation for more information: {reference}""" ) + + +def byte_unit_conversion(byte_size_str: str, output_unit: str = "B") -> float: + """Converts string representation of byte size to another unit and returns float output + + e.g. byte_unit_conversion("1 KB", "B") -> 1000.0 + e.g. byte_unit_conversion("1 KiB", "B") -> 1024.0 + """ + byte_size_str = byte_size_str.lower() + output_unit = output_unit.lower() + + units_multiplier = { + "b": 1, + "k": 1000, + "m": 1000**2, + "g": 1000**3, + "t": 1000**4, + "kb": 1000, + "mb": 1000**2, + "gb": 1000**3, + "tb": 1000**4, + "ki": 1024, + "mi": 1024**2, + "gi": 1024**3, + "ti": 1024**4, + "kib": 1024, + "mib": 1024**2, + "gib": 1024**3, + "tib": 1024**4, + } + + if output_unit not in units_multiplier: + raise ValueError( + f'Invalid input unit "{output_unit}". Valid units are {units_multiplier.keys()}' + ) + + str_pattern = r"\s*^(\d+(?:\.\d*){0,1})\s*([a-zA-Z]*)\s*$" + pattern = re.compile(str_pattern, re.IGNORECASE) + match = pattern.search(byte_size_str) + + if not match: + raise ValueError("Invalid byte size string") + value = float(match.group(1)) + input_unit = match.group(2) + if not input_unit: + input_unit = "b" + + if input_unit not in units_multiplier: + raise ValueError( + f'Invalid input unit "{input_unit}". Valid units are {list(units_multiplier.keys())}' + ) + + return value * units_multiplier[input_unit] / units_multiplier[output_unit] + + +class JsonDiffEnum(str, enum.Enum): + ADDED = "+" + REMOVED = "-" + MODIFIED = "!" + + +class JsonDiff: + def __init__(self, obj1: Dict[str, Any], obj2: Dict[str, Any]): + self.diff = self.json_diff(obj1, obj2) + + @staticmethod + def json_diff(obj1: Dict[str, Any], obj2: Dict[str, Any]) -> Dict[str, Any]: + """Calculates the diff between two json-like objects + + # Example usage + obj1 = {"a": 1, "b": {"c": 2, "d": 3}} + obj2 = {"a": 1, "b": {"c": 2, "e": 4}, "f": 5} + + result = json_diff(obj1, obj2) + """ + diff = {} + for key in set(obj1.keys()) | set(obj2.keys()): + if key not in obj1: + diff[key] = {JsonDiffEnum.ADDED: obj2[key]} + elif key not in obj2: + diff[key] = {JsonDiffEnum.REMOVED: obj1[key]} + elif obj1[key] != obj2[key]: + if isinstance(obj1[key], dict) and isinstance(obj2[key], dict): + nested_diff = JsonDiff.json_diff(obj1[key], obj2[key]) + if nested_diff: + diff[key] = nested_diff + else: + diff[key] = {JsonDiffEnum.MODIFIED: (obj1[key], obj2[key])} + return diff + + @staticmethod + def walk_dict(d, path, sentinel): + for key, value in d.items(): + if key is not sentinel: + if not isinstance(value, dict): + continue + yield from JsonDiff.walk_dict(value, path + [key], sentinel) + else: + yield path, value + + def modified(self): + """Generator that yields the path, old value, and new value of changed items""" + for path, (old, new) in self.walk_dict(self.diff, [], JsonDiffEnum.MODIFIED): + yield path, old, new + + def __repr__(self): + return f"{self.__class__.__name__}(diff={json.dumps(self.diff)})" diff --git a/src/nebari/plugins.py b/src/nebari/plugins.py index c5148e9e1d..71db0ade96 100644 --- a/src/nebari/plugins.py +++ b/src/nebari/plugins.py @@ -36,6 +36,8 @@ "_nebari.stages.kubernetes_keycloak_configuration", "_nebari.stages.kubernetes_services", "_nebari.stages.nebari_tf_extensions", + "_nebari.stages.kubernetes_kuberhealthy", + "_nebari.stages.kubernetes_kuberhealthy_healthchecks", ] @@ -128,7 +130,7 @@ def config_schema(self): classes = [schema.Main] + [ _.input_schema for _ in self.ordered_stages if _.input_schema is not None ] - return type("ConfigSchema", tuple(classes), {}) + return type("ConfigSchema", tuple(classes[::-1]), {}) nebari_plugin_manager = NebariPluginManager() diff --git a/src/nebari/schema.py b/src/nebari/schema.py index 70b9589e6f..6a809842d7 100644 --- a/src/nebari/schema.py +++ b/src/nebari/schema.py @@ -19,13 +19,15 @@ email_regex = "^[^ @]+@[^ @]+\\.[^ @]+$" email_pydantic = Annotated[str, StringConstraints(pattern=email_regex)] -github_url_regex = "^(https://)?github.com/([^/]+)/([^/]+)/?$" +github_url_regex = r"^(https://)?github\.com/([^/]+)/([^/]+)/?$" github_url_pydantic = Annotated[str, StringConstraints(pattern=github_url_regex)] class Base(pydantic.BaseModel): model_config = ConfigDict( - extra="forbid", validate_assignment=True, populate_by_name=True + extra="forbid", + validate_assignment=True, + populate_by_name=True, ) @@ -43,10 +45,24 @@ def to_yaml(cls, representer, node): return representer.represent_str(node.value) +class ExtraFieldSchema(Base): + model_config = ConfigDict( + extra="allow", + validate_assignment=True, + populate_by_name=True, + ) + immutable: bool = ( + False # Whether field supports being changed after initial deployment + ) + + class Main(Base): - project_name: project_name_pydantic + project_name: project_name_pydantic = Field(json_schema_extra={"immutable": True}) namespace: namespace_pydantic = "dev" - provider: ProviderEnum = ProviderEnum.local + provider: ProviderEnum = Field( + default=ProviderEnum.local, + json_schema_extra={"immutable": True}, + ) # In nebari_version only use major.minor.patch version - drop any pre/post/dev suffixes nebari_version: Annotated[str, Field(validate_default=True)] = __version__ diff --git a/tests/common/conda_store_utils.py b/tests/common/conda_store_utils.py new file mode 100644 index 0000000000..c150b4f6f0 --- /dev/null +++ b/tests/common/conda_store_utils.py @@ -0,0 +1,41 @@ +import re + +import requests + +from tests.tests_deployment import constants + + +def get_conda_store_session(): + """Log into conda-store using the test account and get session""" + session = requests.Session() + r = session.get( + f"https://{constants.NEBARI_HOSTNAME}/conda-store/login/?next=", verify=False + ) + auth_url = re.search('action="([^"]+)"', r.content.decode("utf8")).group(1) + response = session.post( + auth_url.replace("&", "&"), + headers={"Content-Type": "application/x-www-form-urlencoded"}, + data={ + "username": constants.KEYCLOAK_USERNAME, + "password": constants.KEYCLOAK_PASSWORD, + "credentialId": "", + }, + verify=False, + ) + assert response.status_code == 200 + return session + + +def get_conda_store_user_permissions(): + """Log into conda-store using the test account and get session and using the token in + session call conda-store API to get permissions. + """ + session = get_conda_store_session() + token = session.cookies.get("conda-store-auth") + response = requests.get( + f"https://{constants.NEBARI_HOSTNAME}/conda-store/api/v1/permission/", + headers={"Authorization": f"Bearer {token}"}, + verify=False, + ) + assert response.status_code == 200 + return response.json() diff --git a/tests/common/handlers.py b/tests/common/handlers.py new file mode 100644 index 0000000000..51964d3ac5 --- /dev/null +++ b/tests/common/handlers.py @@ -0,0 +1,342 @@ +import logging +import re +import time + +from playwright.sync_api import expect + +logger = logging.getLogger() + + +class JupyterLab: + def __init__(self, navigator): + logger.debug(">>> Starting notebook manager...") + self.nav = navigator + self.page = self.nav.page + + def reset_workspace(self): + """Reset the JupyterLab workspace.""" + logger.debug(">>> Resetting JupyterLab workspace") + + # Check for and handle kernel popup + logger.debug(">>> Checking for kernel popup") + if self._check_for_kernel_popup(): + self._handle_kernel_popup() + + # Shutdown all kernels + logger.debug(">>> Shutting down all kernels") + self._shutdown_all_kernels() + + # Navigate back to root folder and close all tabs + logger.debug(">>> Navigating to root folder and closing all tabs") + self._navigate_to_root_folder() + logger.debug(">>> Closing all tabs") + self._close_all_tabs() + + # Ensure theme and launcher screen + logger.debug(">>> Ensuring theme and launcher screen") + self._assert_theme_and_launcher() + + def set_environment(self, kernel): + """Set environment for a Jupyter notebook.""" + if not self._check_for_kernel_popup(): + self._trigger_kernel_change_popup() + + self._handle_kernel_popup(kernel) + self._wait_for_kernel_label(kernel) + + def write_file(self, filepath, content): + """Write a file to the Nebari instance filesystem.""" + logger.debug(f">>> Writing file to {filepath}") + self._open_terminal() + self._execute_terminal_commands( + [f"cat <{filepath}", content, "EOF", f"ls {filepath}"] + ) + time.sleep(2) + + def _check_for_kernel_popup(self): + """Check if the kernel popup is open.""" + logger.debug(">>> Checking for kernel popup") + self.page.wait_for_load_state() + time.sleep(3) + visible = self.page.get_by_text("Select KernelStart a new").is_visible() + logger.debug(f">>> Kernel popup visible: {visible}") + return visible + + def _handle_kernel_popup(self, kernel=None): + """Handle kernel popup by selecting the appropriate kernel or dismissing the popup.""" + if kernel: + self._select_kernel(kernel) + else: + self._dismiss_kernel_popup() + + def _dismiss_kernel_popup(self): + """Dismiss the kernel selection popup.""" + logger.debug(">>> Dismissing kernel popup") + no_kernel_button = self.page.get_by_role("dialog").get_by_role( + "button", name="No Kernel" + ) + if no_kernel_button.is_visible(): + no_kernel_button.click() + else: + try: + self.page.get_by_role("button", name="Cancel").click() + except Exception: + raise ValueError("Unable to escape kernel selection dialog.") + + def _shutdown_all_kernels(self): + """Shutdown all running kernels.""" + logger.debug(">>> Shutting down all kernels") + kernel_menu = self.page.get_by_role("menuitem", name="Kernel") + kernel_menu.click() + shut_down_all = self.page.get_by_role("menuitem", name="Shut Down All Kernels…") + logger.debug( + f">>> Shut down all kernels visible: {shut_down_all.is_visible()} enabled: {shut_down_all.is_enabled()}" + ) + if shut_down_all.is_visible() and shut_down_all.is_enabled(): + shut_down_all.click() + self.page.get_by_role("button", name="Shut Down All").click() + else: + logger.debug(">>> No kernels to shut down") + + def _navigate_to_root_folder(self): + """Navigate back to the root folder in JupyterLab.""" + logger.debug(">>> Navigating to root folder") + self.page.get_by_title(f"/home/{self.nav.username}", exact=True).locator( + "path" + ).click() + + def _close_all_tabs(self): + """Close all open tabs in JupyterLab.""" + logger.debug(">>> Closing all tabs") + self.page.get_by_text("File", exact=True).click() + self.page.get_by_role("menuitem", name="Close All Tabs", exact=True).click() + + if self.page.get_by_text("Save your work", exact=True).is_visible(): + self.page.get_by_role( + "button", name="Discard changes to file", exact=True + ).click() + + def _assert_theme_and_launcher(self): + """Ensure that the theme is set to JupyterLab Dark and Launcher screen is visible.""" + expect( + self.page.get_by_text( + "Set Preferred Dark Theme: JupyterLab Dark", exact=True + ) + ).to_be_hidden() + self.page.get_by_title("VS Code [↗]").wait_for(state="visible") + + def _open_terminal(self): + """Open a new terminal in JupyterLab.""" + self.page.get_by_text("File", exact=True).click() + self.page.get_by_text("New", exact=True).click() + self.page.get_by_role("menuitem", name="Terminal").get_by_text( + "Terminal" + ).click() + + def _execute_terminal_commands(self, commands): + """Execute a series of commands in the terminal.""" + for command in commands: + self.page.get_by_role("textbox", name="Terminal input").fill(command) + self.page.get_by_role("textbox", name="Terminal input").press("Enter") + time.sleep(0.5) + + +class Notebook(JupyterLab): + def __init__(self, navigator): + logger.debug(">>> Starting notebook manager...") + self.nav = navigator + self.page = self.nav.page + + def _open_notebook(self, notebook_name): + """Open a notebook in JupyterLab.""" + self.page.get_by_text("File", exact=True).click() + self.page.locator("#jp-mainmenu-file").get_by_text("Open from Path…").click() + + expect(self.page.get_by_text("Open PathPathCancelOpen")).to_be_visible() + + # Fill notebook name into the textbox and click Open + self.page.get_by_placeholder("/path/relative/to/jlab/root").fill(notebook_name) + self.page.get_by_role("button", name="Open").click() + if self.page.get_by_text("Could not find path:").is_visible(): + self.page.get_by_role("button", name="Dismiss").click() + raise ValueError(f"Notebook {notebook_name} not found") + + # make sure that this notebook is one currently selected + expect(self.page.get_by_role("tab", name=notebook_name)).to_be_visible() + + def _run_all_cells(self): + """Run all cells in a Jupyter notebook.""" + self.page.get_by_role("menuitem", name="Run").click() + run_all_cells = self.page.locator("#jp-mainmenu-run").get_by_text( + "Run All Cells", exact=True + ) + if run_all_cells.is_visible(): + run_all_cells.click() + else: + self.page.get_by_text("Restart the kernel and run").click() + # Check if restart popup is visible + restart_popup = self.page.get_by_text("Restart Kernel?") + if restart_popup.is_visible(): + restart_popup.click() + self.page.get_by_role("button", name="Confirm Kernel Restart").click() + + def _wait_for_commands_completion( + self, timeout: float, completion_wait_time: float + ): + """ + Wait for commands to finish running + + Parameters + ---------- + timeout: float + Time in seconds to wait for the expected output text to appear. + completion_wait_time: float + Time in seconds to wait between checking for expected output text. + """ + elapsed_time = 0.0 + still_visible = True + start_time = time.time() + while elapsed_time < timeout: + running = self.nav.page.get_by_text("[*]").all() + still_visible = any(list(map(lambda r: r.is_visible(), running))) + if not still_visible: + break + elapsed_time = time.time() - start_time + time.sleep(completion_wait_time) + if still_visible: + raise ValueError( + f"Timeout Waited for commands to finish, " + f"but couldn't finish in {timeout} sec" + ) + + def _get_outputs(self): + output_elements = self.nav.page.query_selector_all(".jp-OutputArea-output") + text_content = [element.text_content().strip() for element in output_elements] + return text_content + + def run_notebook(self, notebook_name, kernel): + """Run a notebook in JupyterLab.""" + # Open the notebook + logger.debug(f">>> Opening notebook: {notebook_name}") + self._open_notebook(notebook_name) + + # Set environment + logger.debug(f">>> Setting environment for kernel: {kernel}") + self.set_environment(kernel=kernel) + + # Run all cells + logger.debug(">>> Running all cells") + self._run_all_cells() + + # Wait for commands to finish running + logger.debug(">>> Waiting for commands to finish running") + self._wait_for_commands_completion(timeout=300, completion_wait_time=5) + + # Get the outputs + logger.debug(">>> Gathering outputs") + outputs = self._get_outputs() + + return outputs + + def _trigger_kernel_change_popup(self): + """Trigger the kernel change popup. (expects a notebook to be open)""" + self.page.get_by_role("menuitem", name="Kernel").click() + kernel_menu = self.page.get_by_role("menuitem", name="Change Kernel…") + if kernel_menu.is_visible(): + kernel_menu.click() + self.page.get_by_text("Select KernelStart a new").wait_for(state="visible") + logger.debug(">>> Kernel popup is visible") + else: + pass + + def _select_kernel(self, kernel): + """Select a kernel from the popup.""" + logger.debug(f">>> Selecting kernel: {kernel}") + + self.page.get_by_role("dialog").get_by_label("", exact=True).fill(kernel) + + # List of potential selectors + selectors = [ + self.page.get_by_role("cell", name=re.compile(kernel, re.IGNORECASE)).nth( + 1 + ), + self.page.get_by_role("cell", name=re.compile(kernel, re.IGNORECASE)).first, + self.page.get_by_text(kernel, exact=True).nth(1), + ] + + # Try each selector until one is visible and clickable + # this is done due to the different ways the kernel can be displayed + # as part of the new extension + for selector in selectors: + if selector.is_visible(): + selector.click() + logger.debug(f">>> Kernel {kernel} selected") + return + + # If none of the selectors match, dismiss the popup and raise an error + self._dismiss_kernel_popup() + raise ValueError(f"Kernel {kernel} not found in the list of kernels") + + def _wait_for_kernel_label(self, kernel): + """Wait for the kernel label to be visible.""" + kernel_label_loc = self.page.get_by_role("button", name=kernel) + if not kernel_label_loc.is_visible(): + kernel_label_loc.wait_for(state="attached") + logger.debug(f">>> Kernel label {kernel} is now visible") + + +class CondaStore(JupyterLab): + def __init__(self, navigator): + self.page = navigator.page + self.nav = navigator + + def _open_conda_store_service(self): + self.page.get_by_text("Services", exact=True).click() + self.page.get_by_text("Environment Management").click() + expect(self.page.get_by_role("tab", name="conda-store")).to_be_visible() + time.sleep(2) + + def _open_new_environment_tab(self): + self.page.get_by_label("Create a new environment in").click() + expect(self.page.get_by_text("Create Environment")).to_be_visible() + + def _assert_user_namespace(self): + expect( + self.page.get_by_role("button", name=f"{self.nav.username} Create a new") + ).to_be_visible() + + def _get_shown_namespaces(self): + _envs = self.page.locator("#environmentsScroll").get_by_role("button") + _env_contents = [env.text_content() for env in _envs.all()] + # Remove the "New" entry from each namespace "button" text + return [ + namespace.replace(" New", "") + for namespace in _env_contents + if namespace != " New" + ] + + def _assert_logged_in(self): + login_button = self.page.get_by_role("button", name="Log in") + if login_button.is_visible(): + login_button.click() + # wait for page to reload + self.page.wait_for_load_state() + time.sleep(2) + # A reload is required as conda-store "created" a new page once logged in + self.page.reload() + self.page.wait_for_load_state() + self._open_conda_store_service() + else: + # In this case logout should already be visible + expect(self.page.get_by_role("button", name="Logout")).to_be_visible() + self._assert_user_namespace() + + def conda_store_ui(self): + logger.debug(">>> Opening Conda Store UI") + self._open_conda_store_service() + + logger.debug(">>> Assert user is logged in") + self._assert_logged_in() + + logger.debug(">>> Opening new environment tab") + self._open_new_environment_tab() diff --git a/tests/common/navigator.py b/tests/common/navigator.py index f846d9a545..04e019a7a6 100644 --- a/tests/common/navigator.py +++ b/tests/common/navigator.py @@ -1,125 +1,48 @@ -import contextlib -import datetime as dt import logging import re -import time import urllib +from abc import ABC +from pathlib import Path from playwright.sync_api import expect, sync_playwright logger = logging.getLogger() -class Navigator: - """Base class for Nebari Playwright testing. This provides setup and - teardown methods that all tests will need and some other generally useful - methods such as clearing the workspace. Specific tests such has "Run a - notebook" are included in separate classes which use an instance of - this class. - - The Navigator class and the associated test classes are design to be able - to run either standalone, or inside of pytest. This makes it easy to - develop new tests, but also have them fully prepared to be - included as part of the test suite. - - Parameters - ---------- - nebari_url: str - Nebari URL to access for testing, e.g. "https://{nebari_url} - username: str - Login username for Nebari. For Google login, this will be email address. - password: str - Login password for Nebari. For Google login, this will be the Google - password. - auth: str - Authentication type of this Nebari instance. Options are "google" and - "password". - headless: bool - (Optional) Run the tests in headless mode (without visuals). Defaults - to False. - slow_mo: int - (Optional) Additional milliseconds to add to each Playwright command, - creating the effect of running the tests in slow motion so they are - easier for humans to follow. Defaults to 0. - browser: str - (Optional) Browser on which to run tests. Options are "chromium", - "webkit", and "firefox". Defaults to "chromium". - instance_name: str - (Optional) Server instance type on which to run tests. Options are - based on the configuration of the Nebari instance. Defaults to - "small-instance". Note that special characters (such as parenthesis) - will need to be converted to dashes. Check the HTML element to get the - exact structure. - video_dir: None or str - (Optional) Directory in which to save videos. If None, no video will - be saved. Defaults to None. +class NavigatorMixin(ABC): + """ + A mixin class providing common setup and teardown functionalities for Playwright navigators. """ def __init__( self, - nebari_url, - username, - password, - auth, headless=False, slow_mo=0, browser="chromium", - instance_name="small-instance", video_dir=None, + video_name_prefix=None, ): - self.nebari_url = nebari_url - self.username = username - self.password = password - self.auth = auth - self.initialized = False self.headless = headless self.slow_mo = slow_mo - self.browser = browser - self.instance_name = instance_name + self.browser_name = browser self.video_dir = video_dir + self.video_name_prefix = video_name_prefix + self.initialized = False + self.setup() - self.setup( - browser=self.browser, - headless=self.headless, - slow_mo=self.slow_mo, - ) - self.wait_for_server_spinup = 300_000 # 5 * 60 * 1_000 # 5 minutes in ms - - @property - def initialize(self): - """Ensure that the Navigator is setup and ready for testing.""" - if not self.initialized: - self.setup( - browser=self.browser, - headless=self.headless, - slow_mo=self.slow_mo, - ) - - def setup(self, browser, headless, slow_mo): - """Initial setup for running playwright. Starts playwright, creates - the browser object, a new browser context, and a new page object. - - Parameters - ---------- - browser: str - Browser on which to run tests. Options are "chromium", - "webkit", and "firefox". - headless: bool - Run the tests in headless mode (without visuals) if True - slow_mo: int - Additional milliseconds to add to each Playwright command, - creating the effect of running the tests in slow motion so they are - easier for humans to follow. - """ + def setup(self): + """Setup Playwright browser and context.""" logger.debug(">>> Setting up browser for Playwright") - self.playwright = sync_playwright().start() try: - self.browser = getattr(self.playwright, browser).launch( - headless=headless, slow_mo=slow_mo + self.browser = getattr(self.playwright, self.browser_name).launch( + headless=self.headless, slow_mo=self.slow_mo ) except AttributeError: - raise RuntimeError(f"{browser} browser is not recognized.") from None + raise RuntimeError( + f"{self.browser_name} browser is not recognized." + ) from None + self.context = self.browser.new_context( ignore_https_errors=True, record_video_dir=self.video_dir, @@ -127,26 +50,65 @@ def setup(self, browser, headless, slow_mo): self.page = self.context.new_page() self.initialized = True + def _rename_test_video_path(self, video_path): + """Rename the test video file to the test unique identifier.""" + video_file_name = ( + f"{self.video_name_prefix}.mp4" if self.video_name_prefix else None + ) + if video_file_name and video_path: + Path.rename(video_path, Path(self.video_dir) / video_file_name) + def teardown(self) -> None: - """Shut down and close playwright. This is important to ensure that - no leftover processes are left running in the background.""" - self.context.close() - self.browser.close() # Make sure to close, so that videos are saved. - self.playwright.stop() - logger.debug(">>> Teardown complete.") - - def login(self) -> None: - """Login to nebari deployment using the auth method on the class.""" + """Teardown Playwright browser and context.""" + if self.initialized: + # Rename the video file to the test unique identifier + current_video_path = self.page.video.path() + self._rename_test_video_path(current_video_path) + + self.context.close() + self.browser.close() + self.playwright.stop() + logger.debug(">>> Teardown complete.") + self.initialized = False + + def __enter__(self): + """Enter the runtime context related to this object.""" + return self + + def __exit__(self, exc_type, exc_value, traceback): + """Exit the runtime context related to this object.""" + self.teardown() + + +class LoginNavigator(NavigatorMixin): + """ + A navigator class to handle login operations for Nebari. + """ + + def __init__(self, nebari_url, username, password, auth="password", **kwargs): + super().__init__(**kwargs) + self.nebari_url = nebari_url + self.username = username + self.password = password + self.auth = auth + + def login(self): + """Login to Nebari deployment using the provided authentication method.""" + login_methods = { + "google": self._login_google, + "password": self._login_password, + } try: - return { - "google": self.login_google, - "password": self.login_password, - }[self.auth]() + login_methods[self.auth]() except KeyError: - raise ValueError(f"Auth type of {self.auth} is invalid.") from None + raise ValueError(f"Auth type {self.auth} is invalid.") - def login_google(self) -> None: - """Go to a nebari deployment, login via Google""" + def logout(self): + """Logout from Nebari deployment.""" + self.page.get_by_role("button", name="Logout").click() + self.page.wait_for_load_state + + def _login_google(self): logger.debug(">>> Sign in via Google and start the server") self.page.goto(self.nebari_url) expect(self.page).to_have_url(re.compile(f"{self.nebari_url}*")) @@ -156,278 +118,81 @@ def login_google(self) -> None: self.page.get_by_role("textbox", name="Email or phone").fill(self.username) self.page.get_by_role("button", name="Next").click() self.page.get_by_role("textbox", name="Enter your password").fill(self.password) - - self.page.wait_for_load_state("networkidle") self.page.get_by_role("button", name="Next").click() - - # let the page load self.page.wait_for_load_state("networkidle") - def login_password(self) -> None: - """Go to a nebari deployment, login via Username/Password, and start - a new server. - """ + def _login_password(self): logger.debug(">>> Sign in via Username/Password") self.page.goto(self.nebari_url) expect(self.page).to_have_url(re.compile(f"{self.nebari_url}*")) self.page.get_by_role("button", name="Sign in with Keycloak").click() self.page.get_by_label("Username").fill(self.username) - self.page.get_by_label("Password").click() self.page.get_by_label("Password").fill(self.password) self.page.get_by_role("button", name="Sign In").click() + self.page.wait_for_load_state() - # let the page load - self.page.wait_for_load_state("networkidle") + # Redirect to hub control panel + self.page.goto(urllib.parse.urljoin(self.nebari_url, "hub/home")) + expect(self.page.get_by_role("button", name="Logout")).to_be_visible() + + +class ServerManager(LoginNavigator): + """ + Manages server operations such as starting and stopping a Nebari server. + """ + + def __init__( + self, instance_name="small-instance", wait_for_server_spinup=300_000, **kwargs + ): + super().__init__(**kwargs) + self.instance_name = instance_name + self.wait_for_server_spinup = wait_for_server_spinup + + def start_server(self): + """Start a Nebari server, handling different UI states.""" + self.login() - def start_server(self) -> None: - """Start a nebari server. There are several different web interfaces - possible in this process depending on if you already have a server - running or not. In order for this to work, wait for the page to load, - we look for html elements that exist when no server is running, if - they aren't visible, we check for an existing server start option. - """ - # wait for the page to load logout_button = self.page.get_by_text("Logout", exact=True) logout_button.wait_for(state="attached", timeout=90000) - # if the server is already running start_locator = self.page.get_by_role("button", name="My Server", exact=True) if start_locator.is_visible(): start_locator.click() - - # if server is not yet running - start_locator = self.page.get_by_role("button", name="Start My Server") - if start_locator.is_visible(): - start_locator.click() + else: + start_locator = self.page.get_by_role("button", name="Start My Server") + if start_locator.is_visible(): + start_locator.click() server_options = self.page.get_by_role("heading", name="Server Options") if server_options.is_visible(): - # select instance type (this will fail if this instance type is not - # available) self.page.locator(f"#profile-item-{self.instance_name}").click() self.page.get_by_role("button", name="Start").click() - # wait for server spinup - self.page.wait_for_url( - urllib.parse.urljoin(self.nebari_url, f"user/{self.username}/*"), - wait_until="networkidle", - timeout=180000, - ) - - # the jupyter page loads independent of network activity so here - # we wait for the File menu to be available on the page, a proxy for - # the jupyterlab page being loaded. + self.page.wait_for_url(re.compile(f".*user/{self.username}/.*"), timeout=180000) file_locator = self.page.get_by_text("File", exact=True) - file_locator.wait_for( - timeout=self.wait_for_server_spinup, - state="attached", - ) + file_locator.wait_for(state="attached", timeout=self.wait_for_server_spinup) - logger.debug(">>> Sign in complete.") + logger.debug(">>> Profile Spawn complete.") - def _check_for_kernel_popup(self): - """Is the kernel popup currently open? - - Returns - ------- - True if the kernel popup is open. - """ - self.page.wait_for_load_state("networkidle") - time.sleep(3) - visible = self.page.get_by_text("Select Kernel", exact=True).is_visible() - return visible - - def reset_workspace(self): - """Reset the Jupyterlab workspace. - - * Closes all Tabs & handle possible popups for saving changes, - * make sure any kernel popups are dealt with - * reset file browser is reset to root - * Finally, ensure that the Launcher screen is showing - """ - logger.info(">>> Reset JupyterLab workspace") - - # server is already running and there is no popup - popup = self._check_for_kernel_popup() - - # server is on running and there is a popup - if popup: - self._set_environment_via_popup(kernel=None) - - # go to Kernel menu - kernel_menuitem = self.page.get_by_role("menuitem", name="Kernel", exact=True) - kernel_menuitem.click() - # shut down multiple running kernels - with contextlib.suppress(Exception): - shut_down_all = self.page.get_by_text( - "Shut Down All Kernels...", exact=True - ) - shut_down_all.wait_for(timeout=300, state="attached") - shut_down_all.click() - - # shut down kernel if only one notebook is running - kernel_menuitem.click() - with contextlib.suppress(Exception): - shut_down_current = self.page.get_by_text("Shut Down Kernel", exact=True) - shut_down_current.wait_for(timeout=300, state="attached") - shut_down_current.click() - - # go back to root folder - self.page.get_by_title(f"/home/{self.username}", exact=True).locator( - "path" - ).click() - - # go to File menu + def stop_server(self): + """Stops the Nebari server via the Hub Control Panel.""" self.page.get_by_text("File", exact=True).click() - # close all tabs - self.page.get_by_role("menuitem", name="Close All Tabs", exact=True).click() - - # there may be a popup to save your work, don't save - if self.page.get_by_text("Save your work", exact=True).is_visible(): - self.page.get_by_role("button", name="Discard", exact=True).click() - - # wait to ensure that the Launcher is showing - self.page.get_by_text("VS Code [↗]", exact=True).wait_for( - timeout=3000, state="attached" - ) - - def _set_environment_via_popup(self, kernel=None): - """Set the environment kernel on a jupyter notebook via the popup - dialog box. If kernel is `None`, `No Kernel` is selected and the - popup is dismissed. - - Attributes - ---------- - kernel: str or None - (Optional) name of conda environment to set. Defaults to None. - - """ - if kernel is None: - # close dialog (deal with the two formats of this dialog) - try: - cancel_button = self.page.get_by_text("Cancel", exact=True) - if cancel_button.is_visible(): - cancel_button.click() - else: - self.page.mouse.click(0, 0) - except Exception: - self.page.locator("div").filter(has_text="No KernelSelect").get_by_role( - "button", name="No Kernel" - ).wait_for(timeout=300, state="attached") - else: - # set the environment - # failure here indicates that the environment doesn't exist either - # because of incorrect naming syntax or because the env is still - # being built - - new_launcher_popup = self.page.locator( - ".jp-KernelSelector-Dialog .jp-NewLauncher-table table" - ).nth(0) - if new_launcher_popup.is_visible(): - # for when the jupyterlab-new-launcher extension is installed - new_launcher_popup.locator("td").nth(0).click() - else: - # for when only the native launcher is available - self.page.get_by_role("combobox").nth(1).select_option(kernel) - # click Select to close popup (deal with the two formats of this dialog) - try: - self.page.get_by_role("button", name="Select Kernel").click() - except Exception: - self.page.locator("div").filter( - has_text="No KernelSelect" - ).get_by_role("button", name="Select Kernel").click() - - def set_environment(self, kernel): - """Set environment of a jupyter notebook. - - IMPORTANT: The focus MUST be on the notebook on which you want to set - the environment. - - Conda environments may still be being built shortly after deployment. - - Parameters - ---------- - kernel: str - Name of kernel to set. - - Returns - ------- - None - """ - - popup = self._check_for_kernel_popup() - # if there is not a kernel popup, make it appear - if not popup: - self.page.get_by_role("menuitem", name="Kernel", exact=True).click() - self.page.get_by_role("menuitem", name="Change Kernel…").get_by_text( - "Change Kernel…" - ).click() - - self._set_environment_via_popup(kernel) - - # wait for the jupyter UI to catch up before moving forward - # see if the jupyter notebook label for the conda env is visible - kernel_label_loc = self.page.get_by_role("button", name=kernel) - if not kernel_label_loc.is_visible(): - kernel_label_loc.wait_for(state="attached") - - def open_terminal(self): - """Open Terminal in the Nebari Jupyter Lab""" - self.page.get_by_text("File", exact=True).click() - self.page.get_by_text("New", exact=True).click() - self.page.get_by_role("menuitem", name="Terminal").get_by_text( - "Terminal" - ).click() - - def run_terminal_command(self, command): - """Run a command on the terminal in the Nebari Jupyter Lab - - Parameters - ---------- - command: str - command to run in the terminal - """ - self.page.get_by_role("textbox", name="Terminal input").fill(command) - self.page.get_by_role("textbox", name="Terminal input").press("Enter") - - def write_file(self, filepath, content): - """Write a file to Nebari instance filesystem - - The terminal is a blackbox for the browser. We can't access any of the - displayed text, therefore we have no way of knowing if the commands - are done executing. For this reason, there is an unavoidable sleep - here that prevents playwright from moving on to ensure that the focus - remains on the Terminal until we are done issuing our commands. - - Parameters - ---------- - filepath: str - path to write the file on the nebari file system - content: str - text to write to that file. - """ - start = dt.datetime.now() - logger.debug(f"Writing notebook to {filepath}") - self.open_terminal() - self.run_terminal_command(f"cat <{filepath}") - self.run_terminal_command(content) - self.run_terminal_command("EOF") - self.run_terminal_command(f"ls {filepath}") - logger.debug(f"time to complete {dt.datetime.now() - start}") - time.sleep(2) - - def stop_server(self) -> None: - """Stops the JupyterHub server by navigating to the Hub Control Panel.""" - self.page.get_by_text("File", exact=True).click() - with self.context.expect_page() as page_info: self.page.get_by_role("menuitem", name="Home", exact=True).click() home_page = page_info.value home_page.wait_for_load_state() stop_button = home_page.get_by_role("button", name="Stop My Server") - if not stop_button.is_visible(): - stop_button.wait_for(state="visible") + stop_button.wait_for(state="visible") stop_button.click() stop_button.wait_for(state="hidden") + + +# Factory method for creating different navigators if needed +def navigator_factory(navigator_type, **kwargs): + navigators = { + "login": LoginNavigator, + "server": ServerManager, + } + return navigators[navigator_type](**kwargs) diff --git a/tests/common/notebooks/test_notebook_output.ipynb b/tests/common/notebooks/test_notebook_output.ipynb index 47768a92ba..44daa80642 100644 --- a/tests/common/notebooks/test_notebook_output.ipynb +++ b/tests/common/notebooks/test_notebook_output.ipynb @@ -8,9 +8,7 @@ "tags": [] }, "outputs": [], - "source": [ - "import math" - ] + "source": [] }, { "cell_type": "code", diff --git a/tests/common/playwright_fixtures.py b/tests/common/playwright_fixtures.py index 03e17a5065..35ea36baad 100644 --- a/tests/common/playwright_fixtures.py +++ b/tests/common/playwright_fixtures.py @@ -5,76 +5,102 @@ import dotenv import pytest -from tests.common.navigator import Navigator +from tests.common.navigator import navigator_factory logger = logging.getLogger() -@pytest.fixture(scope="session") -def _navigator_session(request, browser_name, pytestconfig): - """Set up a navigator instance, login with username/password, start - a server. Teardown when session is complete. - Do not use this for individual tests, use `navigator` fixture - for tests.""" +def load_env_vars(): + """Load environment variables using dotenv and return necessary parameters.""" dotenv.load_dotenv() - # try/except added here in attempt to reach teardown after error in - # order to close the browser context which will save the video so I debug - # the error. - try: - nav = Navigator( - nebari_url=request.param.get("nebari_url") or os.environ["NEBARI_FULL_URL"], - username=request.param.get("keycloak_username") - or os.environ["KEYCLOAK_USERNAME"], - password=request.param.get("keycloak_password") - or os.environ["KEYCLOAK_PASSWORD"], - headless=not pytestconfig.getoption("--headed"), - slow_mo=pytestconfig.getoption("--slowmo"), - browser=browser_name, - auth="password", - instance_name=request.param.get( - "instance_name" - ), # small-instance included by default - video_dir="videos/", - ) - except Exception as e: - logger.debug(e) - raise - - try: - nav.login_password() - nav.start_server() - yield nav - except Exception as e: - logger.debug(e) - raise - finally: + return { + "nebari_url": os.getenv("NEBARI_FULL_URL"), + "username": os.getenv("KEYCLOAK_USERNAME"), + "password": os.getenv("KEYCLOAK_PASSWORD"), + } + + +def build_params(request, pytestconfig, extra_params=None): + """Construct and return parameters for navigator instances.""" + env_vars = load_env_vars() + params = { + "nebari_url": request.param.get("nebari_url") or env_vars["nebari_url"], + "username": request.param.get("keycloak_username") or env_vars["username"], + "password": request.param.get("keycloak_password") or env_vars["password"], + "auth": "password", + "video_dir": "videos/", + "headless": pytestconfig.getoption("--headed"), + "slow_mo": pytestconfig.getoption("--slowmo"), + } + if extra_params: + params.update(extra_params) + return params + + +def create_navigator(navigator_type, params): + """Create and return a navigator instance.""" + return navigator_factory(navigator_type, **params) + + +def pytest_sessionstart(session): + """Called before the start of the session. Clean up the videos directory.""" + _videos_path = Path("./videos") + if _videos_path.exists(): + for filename in os.listdir("./videos"): + filepath = _videos_path / filename + filepath.unlink() + + +# scope="function" will make sure that the fixture is created and destroyed for each test function. +@pytest.fixture(scope="function") +def navigator_session(request, pytestconfig): + session_type = request.param.get("session_type") + extra_params = request.param.get("extra_params", {}) + + # Get the test function name for video naming + test_name = request.node.originalname + video_name_prefix = f"video_{test_name}" + extra_params["video_name_prefix"] = video_name_prefix + + params = build_params(request, pytestconfig, extra_params) + + with create_navigator(session_type, params) as nav: + # Setup the navigator instance (e.g., login or start server) try: - nav.stop_server() + if session_type == "login": + nav.login() + elif session_type == "server": + nav.start_server() + yield nav except Exception as e: logger.debug(e) - nav.teardown() + raise + + +def parameterized_fixture(session_type, **extra_params): + """Utility function to create parameterized pytest fixtures.""" + return pytest.mark.parametrize( + "navigator_session", + [{"session_type": session_type, "extra_params": extra_params}], + indirect=True, + ) + + +def server_parameterized(instance_name=None, **kwargs): + return parameterized_fixture("server", instance_name=instance_name, **kwargs) + + +def login_parameterized(**kwargs): + return parameterized_fixture("login", **kwargs) @pytest.fixture(scope="function") -def navigator(_navigator_session): - """High level navigator instance with a reset workspace.""" - _navigator_session.reset_workspace() - yield _navigator_session +def navigator(navigator_session): + """High-level navigator instance. Can be overridden based on the available + parameterized decorator.""" + yield navigator_session @pytest.fixture(scope="session") def test_data_root(): - here = Path(__file__).parent - return here / "notebooks" - - -def navigator_parameterized( - nebari_url=None, keycloak_username=None, keycloak_password=None, instance_name=None -): - param = { - "instance_name": instance_name, - "nebari_url": nebari_url, - "keycloak_username": keycloak_username, - "keycloak_password": keycloak_password, - } - return pytest.mark.parametrize("_navigator_session", [param], indirect=True) + return Path(__file__).parent / "notebooks" diff --git a/tests/common/run_notebook.py b/tests/common/run_notebook.py deleted file mode 100644 index 019fd26710..0000000000 --- a/tests/common/run_notebook.py +++ /dev/null @@ -1,284 +0,0 @@ -import logging -import re -import time -from pathlib import Path -from typing import List, Union - -from tests.common.navigator import Navigator - -logger = logging.getLogger() - - -class Notebook: - def __init__(self, navigator: Navigator): - self.nav = navigator - self.nav.initialize - - def run( - self, - path, - expected_outputs: List[str], - conda_env: str, - timeout: float = 1000, - complition_wait_time: float = 2, - retry: int = 2, - retry_wait_time: float = 5, - exact_match: bool = True, - ): - """Run jupyter notebook and check for expected output text anywhere on - the page. - - Note: This will look for and exact match of expected_output_text - _anywhere_ on the page so be sure that your text is unique. - - Conda environments may still be being built shortly after deployment. - - Parameters - ---------- - path: str - Path to notebook relative to the root of the jupyterlab instance. - expected_outputs: List[str] - Text to look for in the output of the notebook. This can be a - substring of the actual output if exact_match is False. - conda_env: str - Name of conda environment. Python conda environments have the - structure "conda-env-nebari-git-nebari-git-dashboard-py" where - the actual name of the environment is "dashboard". - timeout: float - Time in seconds to wait for the expected output text to appear. - default: 1000 - complition_wait_time: float - Time in seconds to wait between checking for expected output text. - default: 2 - retry: int - Number of times to retry running the notebook. - default: 2 - retry_wait_time: float - Time in seconds to wait between retries. - default: 5 - exact_match: bool - If True, the expected output must match exactly. If False, the - expected output must be a substring of the actual output. - default: True - """ - logger.debug(f">>> Running notebook: {path}") - filename = Path(path).name - - # navigate to specific notebook - self.open_notebook(path) - # make sure the focus is on the dashboard tab we want to run - self.nav.page.get_by_role("tab", name=filename).get_by_text(filename).click() - self.nav.set_environment(kernel=conda_env) - - # make sure that this notebook is one currently selected - self.nav.page.get_by_role("tab", name=filename).get_by_text(filename).click() - - for _ in range(retry): - self._restart_run_all() - # Wait for a couple of seconds to make sure it's re-started - time.sleep(retry_wait_time) - self._wait_for_commands_completion(timeout, complition_wait_time) - all_outputs = self._get_outputs() - assert_match_all_outputs(expected_outputs, all_outputs, exact_match) - - def create_notebook(self, conda_env=None): - file_locator = self.nav.page.get_by_text("File", exact=True) - file_locator.wait_for( - timeout=self.nav.wait_for_server_spinup, - state="attached", - ) - file_locator.click() - submenu = self.nav.page.locator('[data-type="submenu"]').all() - submenu[0].click() - self.nav.page.get_by_role("menuitem", name="Notebook").get_by_text( - "Notebook", exact=True - ).click() - self.nav.page.wait_for_load_state("networkidle") - # make sure the focus is on the dashboard tab we want to run - # self.nav.page.get_by_role("tab", name=filename).get_by_text(filename).click() - self.nav.set_environment(kernel=conda_env) - - def open_notebook(self, path): - file_locator = self.nav.page.get_by_text("File", exact=True) - file_locator.wait_for( - timeout=self.nav.wait_for_server_spinup, - state="attached", - ) - file_locator.click() - self.nav.page.get_by_role("menuitem", name="Open from Path…").get_by_text( - "Open from Path…" - ).click() - self.nav.page.get_by_placeholder("/path/relative/to/jlab/root").fill(path) - self.nav.page.get_by_role("button", name="Open", exact=True).click() - # give the page a second to open, otherwise the options in the kernel - # menu will be disabled. - self.nav.page.wait_for_load_state("networkidle") - - if self.nav.page.get_by_text( - "Could not find path:", - exact=False, - ).is_visible(): - logger.debug("Path to notebook is invalid") - raise RuntimeError("Path to notebook is invalid") - - def assert_code_output( - self, - code: str, - expected_output: str, - timeout: float = 1000, - complition_wait_time: float = 2, - exact_match: bool = True, - ): - """ - Run code in last cell and check for expected output text anywhere on - the page. - - - Parameters - ---------- - code: str - Code to run in last cell. - expected_outputs: List[Union[re.Pattern, str]] - Text to look for in the output of the notebook. - timeout: float - Time in seconds to wait for the expected output text to appear. - default: 1000 - complition_wait_time: float - Time in seconds to wait between checking for expected output text. - """ - self.run_in_last_cell(code) - self._wait_for_commands_completion(timeout, complition_wait_time) - outputs = self._get_outputs() - actual_output = outputs[-1] if outputs else "" - assert_match_output(expected_output, actual_output, exact_match) - - def run_in_last_cell(self, code): - self._create_new_cell() - cell = self._get_last_cell() - cell.click() - cell.type(code) - # Wait for it to be ready to be executed - time.sleep(1) - cell.press("Shift+Enter") - # Wait for execution to start - time.sleep(0.5) - - def _create_new_cell(self): - new_cell_button = self.nav.page.query_selector( - 'button[data-command="notebook:insert-cell-below"]' - ) - new_cell_button.click() - - def _get_last_cell(self): - cells = self.nav.page.locator(".CodeMirror-code").all() - for cell in reversed(cells): - if cell.is_visible(): - return cell - raise ValueError("Unable to get last cell") - - def _wait_for_commands_completion( - self, timeout: float, complition_wait_time: float - ): - """ - Wait for commands to finish running - - Parameters - ---------- - timeout: float - Time in seconds to wait for the expected output text to appear. - complition_wait_time: float - Time in seconds to wait between checking for expected output text. - """ - elapsed_time = 0.0 - still_visible = True - start_time = time.time() - while elapsed_time < timeout: - running = self.nav.page.get_by_text("[*]").all() - still_visible = any(list(map(lambda r: r.is_visible(), running))) - if not still_visible: - break - elapsed_time = time.time() - start_time - time.sleep(complition_wait_time) - if still_visible: - raise ValueError( - f"Timeout Waited for commands to finish, " - f"but couldn't finish in {timeout} sec" - ) - - def _get_outputs(self) -> List[str]: - output_elements = self.nav.page.query_selector_all(".jp-OutputArea-output") - text_content = [element.text_content().strip() for element in output_elements] - return text_content - - def _restart_run_all(self): - # restart run all cells - self.nav.page.get_by_role("menuitem", name="Kernel", exact=True).click() - self.nav.page.get_by_role( - "menuitem", name="Restart Kernel and Run All Cells…" - ).get_by_text("Restart Kernel and Run All Cells…").click() - - # Restart dialog appears most, but not all of the time (e.g. set - # No Kernel, then Restart Run All) - restart_dialog_button = self.nav.page.get_by_role( - "button", name="Confirm Kernel Restart" - ) - if restart_dialog_button.is_visible(): - restart_dialog_button.click() - - -def assert_match_output( - expected_output: str, actual_output: str, exact_match: bool -) -> None: - """Assert that the expected_output is found in the actual_output. - - ---------- - Parameters - - expected_output: str - The expected output text or regular expression to find in the - actual output. - actual_output: str - The actual output text to search for the expected output. - exact_match: bool - If True, then the expected_output must match the actual_output - exactly. Otherwise, the expected_output must be found somewhere in - the actual_output. - """ - regex = re.compile(rf"{expected_output}") - match = ( - regex.fullmatch(actual_output) if exact_match else regex.search(actual_output) - ) - assert ( - match is not None - ), f"Expected output: {expected_output} not found in actual output: {actual_output}" - - -def assert_match_all_outputs( - expected_outputs: List[str], - actual_outputs: List[str], - exact_matches: Union[bool, List[bool]], -) -> None: - """Assert that the expected_outputs are found in the actual_outputs. - The expected_outputs and actual_outputs must be the same length. - - ---------- - Parameters - - expected_outputs: List[str] - A list of expected output text or regular expression to find in - the actual output. - actual_outputs: List[str] - A list of actual output text to search for the expected output. - exact_matches: Union[bool, List[bool]] - If True, then the expected_output must match the actual_output - exactly. Otherwise, the expected_output must be found somewhere in - the actual_output. If a list is provided, then it must be the same - length as expected_outputs and actual_outputs. - """ - if isinstance(exact_matches, bool): - exact_matches = [exact_matches] * len(expected_outputs) - - for exact_output, actual_output, exact in zip( - expected_outputs, actual_outputs, exact_matches - ): - assert_match_output(exact_output, actual_output, exact) diff --git a/tests/common/tests/test_notebook.py b/tests/common/tests/test_notebook.py deleted file mode 100644 index ba8cbbbf84..0000000000 --- a/tests/common/tests/test_notebook.py +++ /dev/null @@ -1,35 +0,0 @@ -import pytest - -from tests.common.run_notebook import assert_match_output - - -@pytest.mark.parametrize( - "expected, actual, exact", - [ - ("success: 6", "success: 6", True), - ("success", "success: 6", False), - ("6", "6", True), - ("cde", "abcde", False), - ("12.*5", "12345", True), - (".*5", "12345", True), - ("ab.*ef", "123abcdef123", False), - ], -) -def test_output_match(expected, actual, exact): - assert_match_output(expected, actual, exact_match=exact) - - -@pytest.mark.parametrize( - "expected, actual, exact", - [ - ("True", "False", True), - ("success: 6", "success", True), - ("60", "6", True), - ("abcde", "cde", True), - ("ab.*ef", "123abcdef123", True), - ], -) -def test_output_not_match(expected, actual, exact): - msg = f"Expected output: {expected} not found in actual output: {actual}" - with pytest.raises(AssertionError, match=msg): - assert_match_output(expected, actual, exact_match=exact) diff --git a/tests/tests_deployment/conftest.py b/tests/tests_deployment/conftest.py index fa71302823..acaf5ad249 100644 --- a/tests/tests_deployment/conftest.py +++ b/tests/tests_deployment/conftest.py @@ -1,6 +1,10 @@ import pytest from tests.tests_deployment.keycloak_utils import delete_client_keycloak_test_roles +from tests.tests_deployment.utils import ( + get_jupyterhub_token, + get_refresh_jupyterhub_token, +) @pytest.fixture() @@ -9,3 +13,33 @@ def cleanup_keycloak_roles(): yield # teardown delete_client_keycloak_test_roles(client_name="jupyterhub") + delete_client_keycloak_test_roles(client_name="conda_store") + + +@pytest.fixture(scope="session") +def jupyterhub_access_token(): + return get_jupyterhub_token(note="base-jupyterhub-token") + + +@pytest.fixture(scope="function") +def refresh_token_response(request, jupyterhub_access_token): + note = request.param # Get the parameter passed to the fixture + yield get_refresh_jupyterhub_token(jupyterhub_access_token, note) + + +def parameterized_fixture(new_note): + """Utility function to create parameterized pytest fixtures.""" + return pytest.mark.parametrize( + "refresh_token_response", + [new_note], + indirect=True, + ) + + +def token_parameterized(note): + return parameterized_fixture(note) + + +@pytest.fixture(scope="function") +def access_token_response(refresh_token_response): + yield refresh_token_response diff --git a/tests/tests_deployment/constants.py b/tests/tests_deployment/constants.py index cfeb6c26e6..519e862e6b 100644 --- a/tests/tests_deployment/constants.py +++ b/tests/tests_deployment/constants.py @@ -1,7 +1,11 @@ import os NEBARI_HOSTNAME = os.environ.get("NEBARI_HOSTNAME", "github-actions.nebari.dev") +NEBARI_CONFIG_PATH = os.environ.get("NEBARI_CONFIG_PATH", "nebari-config.yaml") GATEWAY_ENDPOINT = "gateway" -KEYCLOAK_USERNAME = os.environ["KEYCLOAK_USERNAME"] -KEYCLOAK_PASSWORD = os.environ["KEYCLOAK_PASSWORD"] +KEYCLOAK_USERNAME = os.environ.get("KEYCLOAK_USERNAME", "nebari") +KEYCLOAK_PASSWORD = os.environ.get("KEYCLOAK_PASSWORD", "nebari") + +PARAMIKO_SSH_ALLOW_AGENT = False +PARAMIKO_SSH_LOOK_FOR_KEYS = False diff --git a/tests/tests_deployment/keycloak_utils.py b/tests/tests_deployment/keycloak_utils.py index 6e6f6c21e6..96b302108f 100644 --- a/tests/tests_deployment/keycloak_utils.py +++ b/tests/tests_deployment/keycloak_utils.py @@ -1,9 +1,9 @@ -import os import pathlib from _nebari.config import read_configuration from _nebari.keycloak import get_keycloak_admin_from_config from nebari.plugins import nebari_plugin_manager +from tests.tests_deployment import constants def get_keycloak_client_details_by_name(client_name, keycloak_admin=None): @@ -32,7 +32,7 @@ def get_keycloak_role_details_by_name(roles, role_name): def get_keycloak_admin(): config_schema = nebari_plugin_manager.config_schema - config_filepath = os.environ.get("NEBARI_CONFIG_PATH", "nebari-config.yaml") + config_filepath = constants.NEBARI_CONFIG_PATH assert pathlib.Path(config_filepath).exists() config = read_configuration(config_filepath, config_schema) return get_keycloak_admin_from_config(config) @@ -81,6 +81,31 @@ def create_keycloak_role(client_name: str, role_name: str, scopes: str, componen ) +def get_keycloak_client_role(client_name, role_name): + keycloak_admin = get_keycloak_admin() + client_details = get_keycloak_client_details_by_name( + client_name=client_name, keycloak_admin=keycloak_admin + ) + return keycloak_admin.get_client_role( + client_id=client_details["id"], role_name=role_name + ) + + +def get_keycloak_client_roles(client_name): + keycloak_admin = get_keycloak_admin() + client_details = get_keycloak_client_details_by_name( + client_name=client_name, keycloak_admin=keycloak_admin + ) + return keycloak_admin.get_client_roles(client_id=client_details["id"]) + + +def get_keycloak_role_groups(client_id, role_name): + keycloak_admin = get_keycloak_admin() + return keycloak_admin.get_client_role_groups( + client_id=client_id, role_name=role_name + ) + + def delete_client_keycloak_test_roles(client_name): keycloak_admin = get_keycloak_admin() client_details = get_keycloak_client_details_by_name( diff --git a/tests/tests_deployment/test_conda_store_roles_loaded.py b/tests/tests_deployment/test_conda_store_roles_loaded.py new file mode 100644 index 0000000000..732b0b0154 --- /dev/null +++ b/tests/tests_deployment/test_conda_store_roles_loaded.py @@ -0,0 +1,66 @@ +import pytest + +from tests.common.conda_store_utils import get_conda_store_user_permissions +from tests.tests_deployment import constants +from tests.tests_deployment.keycloak_utils import ( + assign_keycloak_client_role_to_user, + create_keycloak_role, +) + + +@pytest.mark.parametrize( + "scopes,changed_scopes", + ( + [ + "admin!namespace=analyst,developer!namespace=nebari-git", + {"nebari-git/*": ["developer"], "analyst/*": ["admin"]}, + ], + [ + "admin!namespace=analyst,developer!namespace=invalid-namespace", + {"analyst/*": ["admin"]}, + ], + [ + # duplicate namespace role, chose highest permissions + "admin!namespace=analyst,developer!namespace=analyst", + {"analyst/*": ["admin"]}, + ], + ["invalid-role!namespace=analyst", {}], + ), +) +@pytest.mark.filterwarnings("ignore::urllib3.exceptions.InsecureRequestWarning") +@pytest.mark.filterwarnings( + "ignore:.*auto_refresh_token is deprecated:DeprecationWarning" +) +@pytest.mark.filterwarnings("ignore::ResourceWarning") +def test_conda_store_roles_loaded_from_keycloak( + scopes: str, changed_scopes: dict, cleanup_keycloak_roles +): + + # Verify permissions/roles are different from what we're about to set + # So that this test is actually testing the change + permissions = get_conda_store_user_permissions() + entity_roles = permissions["data"]["entity_roles"] + for namespace, role in changed_scopes.items(): + assert entity_roles[namespace] != role + + role = create_keycloak_role( + client_name="conda_store", + # Note: we're clearing this role after every test case, and we're clearing + # it by name, so it must start with test- to be deleted afterwards + role_name="test-custom-role", + scopes=scopes, + component="conda-store", + ) + assert role + # assign created role to the user + assign_keycloak_client_role_to_user( + constants.KEYCLOAK_USERNAME, client_name="conda_store", role=role + ) + permissions = get_conda_store_user_permissions() + updated_entity_roles = permissions["data"]["entity_roles"] + + # Verify permissions/roles are set to expectation + assert updated_entity_roles == { + **entity_roles, + **changed_scopes, + } diff --git a/tests/tests_deployment/test_dask_gateway.py b/tests/tests_deployment/test_dask_gateway.py index 78b02de883..0a1ce0792c 100644 --- a/tests/tests_deployment/test_dask_gateway.py +++ b/tests/tests_deployment/test_dask_gateway.py @@ -4,9 +4,7 @@ import pytest from tests.tests_deployment import constants -from tests.tests_deployment.utils import get_jupyterhub_token, monkeypatch_ssl_context - -monkeypatch_ssl_context() +from tests.tests_deployment.utils import get_jupyterhub_token @pytest.fixture @@ -15,7 +13,17 @@ def dask_gateway_object(): os.environ["JUPYTERHUB_API_TOKEN"] = get_jupyterhub_token( "dask-gateway-pytest-token" ) - return dask_gateway.Gateway( + + # Create custom class from Gateway that disables the tls/ssl verification + # to do that we will override the self._request_kwargs dictionary within the + # __init__, targeting aiohttp.ClientSession.request method + + class DaskGateway(dask_gateway.Gateway): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self._request_kwargs.update({"ssl": False}) + + return DaskGateway( address=f"https://{constants.NEBARI_HOSTNAME}/{constants.GATEWAY_ENDPOINT}", auth="jupyterhub", proxy_address=f"tcp://{constants.NEBARI_HOSTNAME}:8786", diff --git a/tests/tests_deployment/test_jupyterhub_api.py b/tests/tests_deployment/test_jupyterhub_api.py index 5e1a54562b..aaeaf535ac 100644 --- a/tests/tests_deployment/test_jupyterhub_api.py +++ b/tests/tests_deployment/test_jupyterhub_api.py @@ -1,20 +1,24 @@ import pytest +import requests from tests.tests_deployment import constants +from tests.tests_deployment.conftest import token_parameterized from tests.tests_deployment.keycloak_utils import ( assign_keycloak_client_role_to_user, create_keycloak_role, + get_keycloak_client_details_by_name, + get_keycloak_client_role, + get_keycloak_client_roles, + get_keycloak_role_groups, ) -from tests.tests_deployment.utils import create_jupyterhub_token, get_jupyterhub_session +from tests.tests_deployment.utils import get_refresh_jupyterhub_token @pytest.mark.filterwarnings("ignore::urllib3.exceptions.InsecureRequestWarning") -def test_jupyterhub_loads_roles_from_keycloak(): - session = get_jupyterhub_session() - xsrf_token = session.cookies.get("_xsrf") - response = session.get( - f"https://{constants.NEBARI_HOSTNAME}/hub/api/users/{constants.KEYCLOAK_USERNAME}", - headers={"X-XSRFToken": xsrf_token}, +def test_jupyterhub_loads_roles_from_keycloak(jupyterhub_access_token): + response = requests.get( + url=f"https://{constants.NEBARI_HOSTNAME}/hub/api/users/{constants.KEYCLOAK_USERNAME}", + headers={"Authorization": f"Bearer {jupyterhub_access_token}"}, verify=False, ) user = response.json() @@ -30,9 +34,65 @@ def test_jupyterhub_loads_roles_from_keycloak(): "grafana_developer", "manage-account-links", "view-profile", + # default roles + "allow-read-access-to-services-role", + "allow-group-directory-creation-role", } +@token_parameterized(note="get-default-scopes") +@pytest.mark.filterwarnings("ignore::urllib3.exceptions.InsecureRequestWarning") +def test_default_user_role_scopes(access_token_response): + token_scopes = set(access_token_response.json()["scopes"]) + assert "read:services" in token_scopes + + +@pytest.mark.filterwarnings( + "ignore:.*auto_refresh_token is deprecated:DeprecationWarning" +) +@pytest.mark.filterwarnings("ignore::urllib3.exceptions.InsecureRequestWarning") +def test_check_default_roles_added_in_keycloak(): + client_roles = get_keycloak_client_roles(client_name="jupyterhub") + role_names = [role["name"] for role in client_roles] + assert "allow-app-sharing-role" in role_names + assert "allow-read-access-to-services-role" in role_names + assert "allow-group-directory-creation-role" in role_names + + +@pytest.mark.filterwarnings( + "ignore:.*auto_refresh_token is deprecated:DeprecationWarning" +) +@pytest.mark.filterwarnings("ignore::urllib3.exceptions.InsecureRequestWarning") +def test_check_directory_creation_scope_attributes(): + client_role = get_keycloak_client_role( + client_name="jupyterhub", role_name="allow-group-directory-creation-role" + ) + assert client_role["attributes"]["component"][0] == "shared-directory" + assert client_role["attributes"]["scopes"][0] == "write:shared-mount" + + +@pytest.mark.filterwarnings( + "ignore:.*auto_refresh_token is deprecated:DeprecationWarning" +) +@pytest.mark.filterwarnings("ignore::urllib3.exceptions.InsecureRequestWarning") +def test_groups_with_mount_permissions(): + client_role = get_keycloak_client_role( + client_name="jupyterhub", role_name="allow-group-directory-creation-role" + ) + client_details = get_keycloak_client_details_by_name(client_name="jupyterhub") + role_groups = get_keycloak_role_groups( + client_id=client_details["id"], role_name=client_role["name"] + ) + assert set([group["path"] for group in role_groups]) == set( + [ + "/developer", + "/admin", + "/analyst", + ] + ) + + +@token_parameterized(note="before-role-creation-and-assignment") @pytest.mark.parametrize( "component,scopes,expected_scopes_difference", ( @@ -50,13 +110,14 @@ def test_jupyterhub_loads_roles_from_keycloak(): "ignore:.*auto_refresh_token is deprecated:DeprecationWarning" ) def test_keycloak_roles_attributes_parsed_as_jhub_scopes( - component, scopes, expected_scopes_difference, cleanup_keycloak_roles + component, + scopes, + expected_scopes_difference, + cleanup_keycloak_roles, + access_token_response, ): # check token scopes before role creation and assignment - token_response_before = create_jupyterhub_token( - note="before-role-creation-and-assignment" - ) - token_scopes_before = set(token_response_before.json()["scopes"]) + token_scopes_before = set(access_token_response.json()["scopes"]) # create keycloak role with jupyterhub scopes in attributes role = create_keycloak_role( client_name="jupyterhub", @@ -71,8 +132,9 @@ def test_keycloak_roles_attributes_parsed_as_jhub_scopes( assign_keycloak_client_role_to_user( constants.KEYCLOAK_USERNAME, client_name="jupyterhub", role=role ) - token_response_after = create_jupyterhub_token( - note="after-role-creation-and-assignment" + token_response_after = get_refresh_jupyterhub_token( + old_token=access_token_response.json()["token"], + note="after-role-creation-and-assignment", ) token_scopes_after = set(token_response_after.json()["scopes"]) # verify new scopes added/removed @@ -82,12 +144,10 @@ def test_keycloak_roles_attributes_parsed_as_jhub_scopes( @pytest.mark.filterwarnings("ignore::urllib3.exceptions.InsecureRequestWarning") -def test_jupyterhub_loads_groups_from_keycloak(): - session = get_jupyterhub_session() - xsrf_token = session.cookies.get("_xsrf") - response = session.get( +def test_jupyterhub_loads_groups_from_keycloak(jupyterhub_access_token): + response = requests.get( f"https://{constants.NEBARI_HOSTNAME}/hub/api/users/{constants.KEYCLOAK_USERNAME}", - headers={"X-XSRFToken": xsrf_token}, + headers={"Authorization": f"Bearer {jupyterhub_access_token}"}, verify=False, ) user = response.json() diff --git a/tests/tests_deployment/test_jupyterhub_ssh.py b/tests/tests_deployment/test_jupyterhub_ssh.py index fd6b0799d5..d65bd4800f 100644 --- a/tests/tests_deployment/test_jupyterhub_ssh.py +++ b/tests/tests_deployment/test_jupyterhub_ssh.py @@ -7,36 +7,33 @@ from _nebari.utils import escape_string from tests.tests_deployment import constants -from tests.tests_deployment.utils import get_jupyterhub_token, monkeypatch_ssl_context +from tests.tests_deployment.utils import monkeypatch_ssl_context monkeypatch_ssl_context() TIMEOUT_SECS = 300 -@pytest.fixture(scope="session") -def api_token(): - return get_jupyterhub_token("jupyterhub-ssh") - - @pytest.fixture(scope="function") -def paramiko_object(api_token): +def paramiko_object(jupyterhub_access_token): """Connects to JupyterHub ssh cluster from outside the cluster.""" - + params = { + "hostname": constants.NEBARI_HOSTNAME, + "port": 8022, + "username": constants.KEYCLOAK_USERNAME, + "password": jupyterhub_access_token, + "allow_agent": constants.PARAMIKO_SSH_ALLOW_AGENT, + "look_for_keys": constants.PARAMIKO_SSH_LOOK_FOR_KEYS, + "auth_timeout": 5 * 60, + } + + ssh_client = paramiko.SSHClient() + ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) try: - client = paramiko.SSHClient() - client.set_missing_host_key_policy(paramiko.AutoAddPolicy) - client.connect( - hostname=constants.NEBARI_HOSTNAME, - port=8022, - username=constants.KEYCLOAK_USERNAME, - password=api_token, - # wait 5 minutes for jupyterlab server/terminal to spin up - auth_timeout=5 * 60, - ) - yield client + ssh_client.connect(**params) + yield ssh_client finally: - client.close() + ssh_client.close() def run_command(command, stdin, stdout, stderr): @@ -125,6 +122,9 @@ def test_contains_jupyterhub_ssh(paramiko_object): ("cat ~/.bashrc", "Managed by Nebari"), ("cat ~/.profile", "Managed by Nebari"), ("cat ~/.bash_logout", "Managed by Nebari"), + # ensure we don't copy over extra files from /etc/skel in init container + ("ls -la ~/..202*", "No such file or directory"), + ("ls -la ~/..data", "No such file or directory"), ] for command, output in commands_contain: diff --git a/tests/tests_deployment/utils.py b/tests/tests_deployment/utils.py index b0965dd1ae..22d9d9fe01 100644 --- a/tests/tests_deployment/utils.py +++ b/tests/tests_deployment/utils.py @@ -2,48 +2,114 @@ import ssl import requests +import requests.cookies from tests.tests_deployment import constants def get_jupyterhub_session(): session = requests.Session() - r = session.get( - f"https://{constants.NEBARI_HOSTNAME}/hub/oauth_login", verify=False - ) - auth_url = re.search('action="([^"]+)"', r.content.decode("utf8")).group(1) - - r = session.post( - auth_url.replace("&", "&"), - headers={"Content-Type": "application/x-www-form-urlencoded"}, - data={ + session.cookies.clear() + + try: + response = session.get( + f"https://{constants.NEBARI_HOSTNAME}/hub/oauth_login", verify=False + ) + response.raise_for_status() + + # Extract the authentication URL from the response + auth_url_match = re.search('action="([^"]+)"', response.content.decode("utf8")) + + if not auth_url_match: + raise ValueError("Authentication URL not found in response.") + + auth_url = auth_url_match.group(1).replace("&", "&") + + auth_data = { "username": constants.KEYCLOAK_USERNAME, "password": constants.KEYCLOAK_PASSWORD, "credentialId": "", - }, - verify=False, - ) + } + response = session.post( + auth_url, + headers={"Content-Type": "application/x-www-form-urlencoded"}, + data=auth_data, + verify=False, + ) + response.raise_for_status() + + except requests.RequestException as e: + raise ValueError(f"An error occurred during authentication: {e}") + return session def create_jupyterhub_token(note): session = get_jupyterhub_session() - xsrf_token = session.cookies.get("_xsrf") - headers = {"Referer": f"https://{constants.NEBARI_HOSTNAME}/hub/token"} - if xsrf_token: - headers["X-XSRFToken"] = xsrf_token + + try: + # Retrieve the XSRF token from session cookies + xsrf_token = session.cookies.get("_xsrf") + except requests.cookies.CookieConflictError: + xsrf_token = session.cookies.get("_xsrf", path="/hub/") + + if not xsrf_token: + raise ValueError("XSRF token not found in session cookies.") + + headers = { + "Referer": f"https://{constants.NEBARI_HOSTNAME}/hub/token", + "X-XSRFToken": xsrf_token, + } + + url = f"https://{constants.NEBARI_HOSTNAME}/hub/api/users/{constants.KEYCLOAK_USERNAME}/tokens" + payload = {"note": note, "expires_in": None} + + try: + response = session.post(url, headers=headers, json=payload, verify=False) + if response.status_code == 403: + # Retry with refreshed XSRF token if initial attempt is forbidden + xsrf_token = response.cookies.get("_xsrf") + headers["X-XSRFToken"] = xsrf_token + response = session.post(url, headers=headers, json=payload, verify=False) + response.raise_for_status() + except requests.RequestException as e: + raise ValueError(f"Failed to create JupyterHub token: {e}") + + return response + + +def get_refresh_jupyterhub_token(old_token, note): + headers = { + "Content-Type": "application/json", + "Authorization": f"Bearer {old_token}", + } + data = {"note": note, "expires_in": None} - return session.post( - f"https://{constants.NEBARI_HOSTNAME}/hub/api/users/{constants.KEYCLOAK_USERNAME}/tokens", - headers=headers, - json=data, - verify=False, - ) + + try: + response = requests.post( + f"https://{constants.NEBARI_HOSTNAME}/hub/api/users/{constants.KEYCLOAK_USERNAME}/tokens", + headers=headers, + json=data, + verify=False, + ) + response.raise_for_status() # Ensure the request was successful + + except requests.exceptions.RequestException as e: + raise ValueError(f"An error occurred while creating the token: {e}") + + return response def get_jupyterhub_token(note="jupyterhub-tests-deployment"): response = create_jupyterhub_token(note=note) - return response.json()["token"] + try: + token = response.json()["token"] + except (KeyError, ValueError) as e: + print(f"An error occurred while retrieving the token: {e}") + raise + + return token def monkeypatch_ssl_context(): diff --git a/tests/tests_e2e/cypress.json b/tests/tests_e2e/cypress.json deleted file mode 100644 index 2c63c08510..0000000000 --- a/tests/tests_e2e/cypress.json +++ /dev/null @@ -1,2 +0,0 @@ -{ -} diff --git a/tests/tests_e2e/cypress/.gitignore b/tests/tests_e2e/cypress/.gitignore deleted file mode 100644 index c5cfa0cf4e..0000000000 --- a/tests/tests_e2e/cypress/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -cypress/videos/ -cypress/screenshots/ -fixtures/ diff --git a/tests/tests_e2e/cypress/integration/main.js b/tests/tests_e2e/cypress/integration/main.js deleted file mode 100644 index e25d60fd8d..0000000000 --- a/tests/tests_e2e/cypress/integration/main.js +++ /dev/null @@ -1,103 +0,0 @@ -const { divide } = require("lodash"); - -const security_authentication_type = Cypress.env('nebari_security_authentication_type'); - -const EXAMPLE_USER_NAME = Cypress.env('EXAMPLE_USER_NAME') || 'example-user'; - -const EXAMPLE_USER_PASSWORD = Cypress.env('EXAMPLE_USER_PASSWORD'); - - -describe('First Test', () => { - - if (security_authentication_type == 'Auth0') { - - it('Check Auth0 login page is running', () => { - - cy.visit('/hub/home'); - - cy.get('#login-main > div.service-login > a') - .should('contain', 'Sign in with Keycloak').click(); - - cy.get('a#social-auth0') - .should('contain', 'auth0'); - - }) - - } else if (security_authentication_type == 'GitHub') { - - it('Check GitHub login page is running', () => { - - cy.visit('/hub/home'); - - cy.get('#login-main > div.service-login > a') - .should('contain', 'Sign in with Keycloak').click(); - - cy.get('a#social-github') - .should('contain', 'github'); - - - }) - - } else if (security_authentication_type == 'password') { - - it('Check Nebari login and start JupyterLab', () => { - - cy.loginWithPassword(EXAMPLE_USER_NAME, EXAMPLE_USER_PASSWORD); - - // Start my Jupyter server - - cy.get('#start') - .should('contain', 'My Server').click(); - - cy.get('h1') - .should('contain', 'Server Options'); - - cy.get('button.btn.btn-jupyter') - .should('contain', 'Start').click(); - - // Minimal check that JupyterLab has opened - cy.get('div#jp-MainLogo', { timeout: 60000 }).should('exist').wait(4000); - - // Click VS Code Launcher exists - cy.get('div.jp-LauncherCard[title="VS Code [↗]"]').should('exist'); - - // Should reflect theme set by default_settings - cy.get('body[data-jp-theme-name="JupyterLab Dark"]').should('exist'); - - // Stop my Jupyter server - must do this so PVC can be destroyed on Minikube - cy.visit('/hub/home'); - - // wait because otherwise event handler is not yet registered - // 'Correct' solution is here: https://www.cypress.io/blog/2019/01/22/when-can-the-test-click/ - cy.get('#stop') - .should('contain', 'Stop My Server').wait(1000).click(); - - cy.get('#start', { timeout: 40000 }) - .should('contain', 'Start My Server'); - - // Visit Conda-Store - - cy.visit('/conda-store/login/'); - - cy.get('body > nav > a') - .contains('conda-store') - .should('have.attr', 'href'); - - // Visit Grafana Monitoring - user must have an email address in Keycloak - - cy.visit('/monitoring/dashboards'); - - cy.get('div.page-header h1', { timeout: 20000 }).should('contain', 'Dashboards'); - - // Visit Keycloak User Profile - - cy.visit('/auth/realms/nebari/account/#/personal-info'); - - cy.get('input#user-name', { timeout: 20000 }).should('have.value', EXAMPLE_USER_NAME); - }) - - } else { - throw new Error("No security_authentication_type env var is set"); - } - -}) diff --git a/tests/tests_e2e/cypress/notebooks/BasicTest.ipynb b/tests/tests_e2e/cypress/notebooks/BasicTest.ipynb deleted file mode 100644 index 2f334735fb..0000000000 --- a/tests/tests_e2e/cypress/notebooks/BasicTest.ipynb +++ /dev/null @@ -1,59 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "id": "30609efe-beee-485e-993f-62c78d31f3c1", - "metadata": {}, - "outputs": [ - { - "output_type": "stream", - "name": "stdout", - "text": [ - "test\n" - ] - } - ], - "source": [ - "print('test')\n", - "\n", - "from dask_gateway import Gateway\n", - "gateway = Gateway()\n", - "options = gateway.cluster_options()\n", - "cluster = gateway.new_cluster(options)\n", - "cluster" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "34711394-bd9d-4dc4-842c-740a2a538686", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "name": "python3", - "display_name": "Python 3.9.5 64-bit ('qhub': virtualenvwrapper)" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.9.5" - }, - "interpreter": { - "hash": "a0bdbbbd35b8737d6ed8fd4e3e20910c9a2e9c6b9d6f134defe083657da9d441" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/tests/tests_e2e/cypress/plugins/index.js b/tests/tests_e2e/cypress/plugins/index.js deleted file mode 100644 index 826dd50a48..0000000000 --- a/tests/tests_e2e/cypress/plugins/index.js +++ /dev/null @@ -1,36 +0,0 @@ -const path = require('path'); - -const yaml_fields = [ - 'security.authentication.type' -]; - -module.exports = (on, config) => { - - const fs = require('fs'); - const _ = require('lodash'); - const yaml = require('js-yaml'); - - let new_config = {}; - - try { - - let fileContents = fs.readFileSync(process.env.NEBARI_CONFIG_PATH, 'utf8'); - let data = yaml.load(fileContents); - - console.log(data); - - new_config['env'] = _.fromPairs( - _.map(yaml_fields, - field => ['nebari_'+field.replace(/\./g, '_') , _.get(data, field, '')] - ) - ); - - new_config['env']['full_path_of_cypress_folder'] = path.resolve(__dirname, ".."); - - } - catch (e) { - console.log(e); - } - - return new_config; - }; diff --git a/tests/tests_e2e/cypress/support/index.js b/tests/tests_e2e/cypress/support/index.js deleted file mode 100644 index b4ea442c1c..0000000000 --- a/tests/tests_e2e/cypress/support/index.js +++ /dev/null @@ -1,44 +0,0 @@ -// *********************************************************** -// This example support/index.js is processed and -// loaded automatically before your test files. -// -// This is a great place to put global configuration and -// behavior that modifies Cypress. -// -// You can change the location of this file or turn off -// automatically serving support files with the -// 'supportFile' configuration option. -// -// You can read more here: -// https://on.cypress.io/configuration -// *********************************************************** - -// Import commands.js using ES2015 syntax: -// import './commands' - -// Alternatively you can use CommonJS syntax: -// require('./commands') - -const path = require('path'); - -Cypress.on('uncaught:exception', (err, runnable) => { - // returning false here prevents Cypress from - // failing the test - return false; -}); - - -Cypress.Commands.add('loginWithPassword', (username, password) => { - cy.visit('/hub/home'); - - cy.get('#login-main > div.service-login > a') - .should('contain', 'Sign in with Keycloak').click(); - - cy.get('input#username') - .type(username); - - cy.get('input#password') - .type(password); - - cy.get('form').submit(); -}); diff --git a/tests/tests_e2e/package-lock.json b/tests/tests_e2e/package-lock.json deleted file mode 100644 index 0252ae09a5..0000000000 --- a/tests/tests_e2e/package-lock.json +++ /dev/null @@ -1,4128 +0,0 @@ -{ - "name": "e2etest", - "version": "1.0.0", - "lockfileVersion": 2, - "requires": true, - "packages": { - "": { - "name": "e2etest", - "version": "1.0.0", - "license": "ISC", - "devDependencies": { - "cypress": "^6.8.0", - "js-yaml": "^4.0.0", - "lodash": "^4.17.21" - } - }, - "node_modules/@cypress/listr-verbose-renderer": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/@cypress/listr-verbose-renderer/-/listr-verbose-renderer-0.4.1.tgz", - "integrity": "sha1-p3SS9LEdzHxEajSz4ochr9M8ZCo=", - "dev": true, - "dependencies": { - "chalk": "^1.1.3", - "cli-cursor": "^1.0.2", - "date-fns": "^1.27.2", - "figures": "^1.7.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/@cypress/listr-verbose-renderer/node_modules/ansi-styles": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-2.2.1.tgz", - "integrity": "sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/@cypress/listr-verbose-renderer/node_modules/chalk": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-1.1.3.tgz", - "integrity": "sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg=", - "dev": true, - "dependencies": { - "ansi-styles": "^2.2.1", - "escape-string-regexp": "^1.0.2", - "has-ansi": "^2.0.0", - "strip-ansi": "^3.0.0", - "supports-color": "^2.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/@cypress/listr-verbose-renderer/node_modules/supports-color": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-2.0.0.tgz", - "integrity": "sha1-U10EXOa2Nj+kARcIRimZXp3zJMc=", - "dev": true, - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/@cypress/request": { - "version": "2.88.5", - "resolved": "https://registry.npmjs.org/@cypress/request/-/request-2.88.5.tgz", - "integrity": "sha512-TzEC1XMi1hJkywWpRfD2clreTa/Z+lOrXDCxxBTBPEcY5azdPi56A6Xw+O4tWJnaJH3iIE7G5aDXZC6JgRZLcA==", - "dev": true, - "dependencies": { - "aws-sign2": "~0.7.0", - "aws4": "^1.8.0", - "caseless": "~0.12.0", - "combined-stream": "~1.0.6", - "extend": "~3.0.2", - "forever-agent": "~0.6.1", - "form-data": "~2.3.2", - "har-validator": "~5.1.3", - "http-signature": "~1.2.0", - "is-typedarray": "~1.0.0", - "isstream": "~0.1.2", - "json-stringify-safe": "~5.0.1", - "mime-types": "~2.1.19", - "oauth-sign": "~0.9.0", - "performance-now": "^2.1.0", - "qs": "~6.5.2", - "safe-buffer": "^5.1.2", - "tough-cookie": "~2.5.0", - "tunnel-agent": "^0.6.0", - "uuid": "^3.3.2" - }, - "engines": { - "node": ">= 6" - } - }, - "node_modules/@cypress/xvfb": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/@cypress/xvfb/-/xvfb-1.2.4.tgz", - "integrity": "sha512-skbBzPggOVYCbnGgV+0dmBdW/s77ZkAOXIC1knS8NagwDjBrNC1LuXtQJeiN6l+m7lzmHtaoUw/ctJKdqkG57Q==", - "dev": true, - "dependencies": { - "debug": "^3.1.0", - "lodash.once": "^4.1.1" - } - }, - "node_modules/@cypress/xvfb/node_modules/debug": { - "version": "3.2.7", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", - "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", - "dev": true, - "dependencies": { - "ms": "^2.1.1" - } - }, - "node_modules/@samverschueren/stream-to-observable": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/@samverschueren/stream-to-observable/-/stream-to-observable-0.3.1.tgz", - "integrity": "sha512-c/qwwcHyafOQuVQJj0IlBjf5yYgBI7YPJ77k4fOJYesb41jio65eaJODRUmfYKhTOFBrIZ66kgvGPlNbjuoRdQ==", - "dev": true, - "dependencies": { - "any-observable": "^0.3.0" - }, - "engines": { - "node": ">=6" - }, - "peerDependenciesMeta": { - "rxjs": { - "optional": true - }, - "zen-observable": { - "optional": true - } - } - }, - "node_modules/@types/node": { - "version": "12.12.50", - "resolved": "https://registry.npmjs.org/@types/node/-/node-12.12.50.tgz", - "integrity": "sha512-5ImO01Fb8YsEOYpV+aeyGYztcYcjGsBvN4D7G5r1ef2cuQOpymjWNQi5V0rKHE6PC2ru3HkoUr/Br2/8GUA84w==", - "dev": true - }, - "node_modules/@types/sinonjs__fake-timers": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/@types/sinonjs__fake-timers/-/sinonjs__fake-timers-6.0.2.tgz", - "integrity": "sha512-dIPoZ3g5gcx9zZEszaxLSVTvMReD3xxyyDnQUjA6IYDG9Ba2AV0otMPs+77sG9ojB4Qr2N2Vk5RnKeuA0X/0bg==", - "dev": true - }, - "node_modules/@types/sizzle": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/@types/sizzle/-/sizzle-2.3.2.tgz", - "integrity": "sha512-7EJYyKTL7tFR8+gDbB6Wwz/arpGa0Mywk1TJbNzKzHtzbwVmY4HR9WqS5VV7dsBUKQmPNr192jHr/VpBluj/hg==", - "dev": true - }, - "node_modules/ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", - "dev": true, - "dependencies": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/epoberezkin" - } - }, - "node_modules/ansi-escapes": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-3.2.0.tgz", - "integrity": "sha512-cBhpre4ma+U0T1oM5fXg7Dy1Jw7zzwv7lt/GoCpr+hDQJoYnKVPLL4dCvSEFMmQurOQvSrwT7SL/DAlhBI97RQ==", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/ansi-regex": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", - "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "dependencies": { - "color-convert": "^2.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/chalk/ansi-styles?sponsor=1" - } - }, - "node_modules/any-observable": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/any-observable/-/any-observable-0.3.0.tgz", - "integrity": "sha512-/FQM1EDkTsf63Ub2C6O7GuYFDsSXUwsaZDurV0np41ocwq0jthUAYCmhBX9f+KwlaCgIuWyr/4WlUQUBfKfZog==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/arch": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/arch/-/arch-2.2.0.tgz", - "integrity": "sha512-Of/R0wqp83cgHozfIYLbBMnej79U/SVGOOyuB3VVFv1NRM/PSFMK12x9KVtiYzJqmnU5WR2qp0Z5rHb7sWGnFQ==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ] - }, - "node_modules/argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", - "dev": true - }, - "node_modules/asn1": { - "version": "0.2.4", - "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.4.tgz", - "integrity": "sha512-jxwzQpLQjSmWXgwaCZE9Nz+glAG01yF1QnWgbhGwHI5A6FRIEY6IVqtHhIepHqI7/kyEyQEagBC5mBEFlIYvdg==", - "dev": true, - "dependencies": { - "safer-buffer": "~2.1.0" - } - }, - "node_modules/assert-plus": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", - "integrity": "sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU=", - "dev": true, - "engines": { - "node": ">=0.8" - } - }, - "node_modules/async": { - "version": "3.2.3", - "resolved": "https://registry.npmjs.org/async/-/async-3.2.3.tgz", - "integrity": "sha512-spZRyzKL5l5BZQrr/6m/SqFdBN0q3OCI0f9rjfBzCMBIP4p75P620rR3gTmaksNOhmzgdxcaxdNfMy6anrbM0g==", - "dev": true - }, - "node_modules/asynckit": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", - "integrity": "sha1-x57Zf380y48robyXkLzDZkdLS3k=", - "dev": true - }, - "node_modules/at-least-node": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/at-least-node/-/at-least-node-1.0.0.tgz", - "integrity": "sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg==", - "dev": true, - "engines": { - "node": ">= 4.0.0" - } - }, - "node_modules/aws-sign2": { - "version": "0.7.0", - "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz", - "integrity": "sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg=", - "dev": true, - "engines": { - "node": "*" - } - }, - "node_modules/aws4": { - "version": "1.11.0", - "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.11.0.tgz", - "integrity": "sha512-xh1Rl34h6Fi1DC2WWKfxUTVqRsNnr6LsKz2+hfwDxQJWmrx8+c7ylaqBMcHfl1U1r2dsifOvKX3LQuLNZ+XSvA==", - "dev": true - }, - "node_modules/balanced-match": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz", - "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=", - "dev": true - }, - "node_modules/bcrypt-pbkdf": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz", - "integrity": "sha1-pDAdOJtqQ/m2f/PKEaP2Y342Dp4=", - "dev": true, - "dependencies": { - "tweetnacl": "^0.14.3" - } - }, - "node_modules/blob-util": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/blob-util/-/blob-util-2.0.2.tgz", - "integrity": "sha512-T7JQa+zsXXEa6/8ZhHcQEW1UFfVM49Ts65uBkFL6fz2QmrElqmbajIDJvuA0tEhRe5eIjpV9ZF+0RfZR9voJFQ==", - "dev": true - }, - "node_modules/bluebird": { - "version": "3.7.2", - "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz", - "integrity": "sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==", - "dev": true - }, - "node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dev": true, - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "node_modules/buffer-crc32": { - "version": "0.2.13", - "resolved": "https://registry.npmjs.org/buffer-crc32/-/buffer-crc32-0.2.13.tgz", - "integrity": "sha1-DTM+PwDqxQqhRUq9MO+MKl2ackI=", - "dev": true, - "engines": { - "node": "*" - } - }, - "node_modules/buffer-from": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.1.tgz", - "integrity": "sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A==", - "dev": true - }, - "node_modules/cachedir": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/cachedir/-/cachedir-2.3.0.tgz", - "integrity": "sha512-A+Fezp4zxnit6FanDmv9EqXNAi3vt9DWp51/71UEhXukb7QUuvtv9344h91dyAxuTLoSYJFU299qzR3tzwPAhw==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/caseless": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz", - "integrity": "sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw=", - "dev": true - }, - "node_modules/chalk": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.0.tgz", - "integrity": "sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A==", - "dev": true, - "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" - } - }, - "node_modules/check-more-types": { - "version": "2.24.0", - "resolved": "https://registry.npmjs.org/check-more-types/-/check-more-types-2.24.0.tgz", - "integrity": "sha1-FCD/sQ/URNz8ebQ4kbv//TKoRgA=", - "dev": true, - "engines": { - "node": ">= 0.8.0" - } - }, - "node_modules/ci-info": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-2.0.0.tgz", - "integrity": "sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ==", - "dev": true - }, - "node_modules/cli-cursor": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-1.0.2.tgz", - "integrity": "sha1-ZNo/fValRBLll5S9Ytw1KV6PKYc=", - "dev": true, - "dependencies": { - "restore-cursor": "^1.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/cli-table3": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/cli-table3/-/cli-table3-0.6.0.tgz", - "integrity": "sha512-gnB85c3MGC7Nm9I/FkiasNBOKjOiO1RNuXXarQms37q4QMpWdlbBgD/VnOStA2faG1dpXMv31RFApjX1/QdgWQ==", - "dev": true, - "dependencies": { - "object-assign": "^4.1.0", - "string-width": "^4.2.0" - }, - "engines": { - "node": "10.* || >= 12.*" - }, - "optionalDependencies": { - "colors": "^1.1.2" - } - }, - "node_modules/cli-truncate": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/cli-truncate/-/cli-truncate-0.2.1.tgz", - "integrity": "sha1-nxXPuwcFAFNpIWxiasfQWrkN1XQ=", - "dev": true, - "dependencies": { - "slice-ansi": "0.0.4", - "string-width": "^1.0.1" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/cli-truncate/node_modules/is-fullwidth-code-point": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz", - "integrity": "sha1-754xOG8DGn8NZDr4L95QxFfvAMs=", - "dev": true, - "dependencies": { - "number-is-nan": "^1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/cli-truncate/node_modules/string-width": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-1.0.2.tgz", - "integrity": "sha1-EYvfW4zcUaKn5w0hHgfisLmxB9M=", - "dev": true, - "dependencies": { - "code-point-at": "^1.0.0", - "is-fullwidth-code-point": "^1.0.0", - "strip-ansi": "^3.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/code-point-at": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/code-point-at/-/code-point-at-1.1.0.tgz", - "integrity": "sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "dependencies": { - "color-name": "~1.1.4" - }, - "engines": { - "node": ">=7.0.0" - } - }, - "node_modules/color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "node_modules/colors": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/colors/-/colors-1.4.0.tgz", - "integrity": "sha512-a+UqTh4kgZg/SlGvfbzDHpgRu7AAQOmmqRHJnxhRZICKFUT91brVhNNt58CMWU9PsBbv3PDCZUHbVxuDiH2mtA==", - "dev": true, - "optional": true, - "engines": { - "node": ">=0.1.90" - } - }, - "node_modules/combined-stream": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", - "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", - "dev": true, - "dependencies": { - "delayed-stream": "~1.0.0" - }, - "engines": { - "node": ">= 0.8" - } - }, - "node_modules/commander": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-5.1.0.tgz", - "integrity": "sha512-P0CysNDQ7rtVw4QIQtm+MRxV66vKFSvlsQvGYXZWR3qFU0jlMKHZZZgw8e+8DSah4UDKMqnknRDQz+xuQXQ/Zg==", - "dev": true, - "engines": { - "node": ">= 6" - } - }, - "node_modules/common-tags": { - "version": "1.8.0", - "resolved": "https://registry.npmjs.org/common-tags/-/common-tags-1.8.0.tgz", - "integrity": "sha512-6P6g0uetGpW/sdyUy/iQQCbFF0kWVMSIVSyYz7Zgjcgh8mgw8PQzDNZeyZ5DQ2gM7LBoZPHmnjz8rUthkBG5tw==", - "dev": true, - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/concat-map": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=", - "dev": true - }, - "node_modules/concat-stream": { - "version": "1.6.2", - "resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-1.6.2.tgz", - "integrity": "sha512-27HBghJxjiZtIk3Ycvn/4kbJk/1uZuJFfuPEns6LaEvpvG1f0hTea8lilrouyo9mVc2GWdcEZ8OLoGmSADlrCw==", - "dev": true, - "engines": [ - "node >= 0.8" - ], - "dependencies": { - "buffer-from": "^1.0.0", - "inherits": "^2.0.3", - "readable-stream": "^2.2.2", - "typedarray": "^0.0.6" - } - }, - "node_modules/core-util-is": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", - "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=", - "dev": true - }, - "node_modules/cross-spawn": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", - "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", - "dev": true, - "dependencies": { - "path-key": "^3.1.0", - "shebang-command": "^2.0.0", - "which": "^2.0.1" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/cypress": { - "version": "6.8.0", - "resolved": "https://registry.npmjs.org/cypress/-/cypress-6.8.0.tgz", - "integrity": "sha512-W2e9Oqi7DmF48QtOD0LfsOLVq6ef2hcXZvJXI/E3PgFNmZXEVwBefhAxVCW9yTPortjYA2XkM20KyC4HRkOm9w==", - "dev": true, - "hasInstallScript": true, - "dependencies": { - "@cypress/listr-verbose-renderer": "^0.4.1", - "@cypress/request": "^2.88.5", - "@cypress/xvfb": "^1.2.4", - "@types/node": "12.12.50", - "@types/sinonjs__fake-timers": "^6.0.1", - "@types/sizzle": "^2.3.2", - "arch": "^2.1.2", - "blob-util": "2.0.2", - "bluebird": "^3.7.2", - "cachedir": "^2.3.0", - "chalk": "^4.1.0", - "check-more-types": "^2.24.0", - "cli-table3": "~0.6.0", - "commander": "^5.1.0", - "common-tags": "^1.8.0", - "dayjs": "^1.9.3", - "debug": "4.3.2", - "eventemitter2": "^6.4.2", - "execa": "^4.0.2", - "executable": "^4.1.1", - "extract-zip": "^1.7.0", - "fs-extra": "^9.0.1", - "getos": "^3.2.1", - "is-ci": "^2.0.0", - "is-installed-globally": "^0.3.2", - "lazy-ass": "^1.6.0", - "listr": "^0.14.3", - "lodash": "^4.17.19", - "log-symbols": "^4.0.0", - "minimist": "^1.2.5", - "moment": "^2.29.1", - "ospath": "^1.2.2", - "pretty-bytes": "^5.4.1", - "ramda": "~0.27.1", - "request-progress": "^3.0.0", - "supports-color": "^7.2.0", - "tmp": "~0.2.1", - "untildify": "^4.0.0", - "url": "^0.11.0", - "yauzl": "^2.10.0" - }, - "bin": { - "cypress": "bin/cypress" - }, - "engines": { - "node": ">=10.0.0" - } - }, - "node_modules/dashdash": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz", - "integrity": "sha1-hTz6D3y+L+1d4gMmuN1YEDX24vA=", - "dev": true, - "dependencies": { - "assert-plus": "^1.0.0" - }, - "engines": { - "node": ">=0.10" - } - }, - "node_modules/date-fns": { - "version": "1.30.1", - "resolved": "https://registry.npmjs.org/date-fns/-/date-fns-1.30.1.tgz", - "integrity": "sha512-hBSVCvSmWC+QypYObzwGOd9wqdDpOt+0wl0KbU+R+uuZBS1jN8VsD1ss3irQDknRj5NvxiTF6oj/nDRnN/UQNw==", - "dev": true - }, - "node_modules/dayjs": { - "version": "1.10.4", - "resolved": "https://registry.npmjs.org/dayjs/-/dayjs-1.10.4.tgz", - "integrity": "sha512-RI/Hh4kqRc1UKLOAf/T5zdMMX5DQIlDxwUe3wSyMMnEbGunnpENCdbUgM+dW7kXidZqCttBrmw7BhN4TMddkCw==", - "dev": true - }, - "node_modules/debug": { - "version": "4.3.2", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.2.tgz", - "integrity": "sha512-mOp8wKcvj7XxC78zLgw/ZA+6TSgkoE2C/ienthhRD298T7UNwAg9diBpLRxC0mOezLl4B0xV7M0cCO6P/O0Xhw==", - "dev": true, - "dependencies": { - "ms": "2.1.2" - }, - "engines": { - "node": ">=6.0" - }, - "peerDependenciesMeta": { - "supports-color": { - "optional": true - } - } - }, - "node_modules/delayed-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", - "integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk=", - "dev": true, - "engines": { - "node": ">=0.4.0" - } - }, - "node_modules/ecc-jsbn": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz", - "integrity": "sha1-OoOpBOVDUyh4dMVkt1SThoSamMk=", - "dev": true, - "dependencies": { - "jsbn": "~0.1.0", - "safer-buffer": "^2.1.0" - } - }, - "node_modules/elegant-spinner": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/elegant-spinner/-/elegant-spinner-1.0.1.tgz", - "integrity": "sha1-2wQ1IcldfjA/2PNFvtwzSc+wcp4=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "dev": true - }, - "node_modules/end-of-stream": { - "version": "1.4.4", - "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", - "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", - "dev": true, - "dependencies": { - "once": "^1.4.0" - } - }, - "node_modules/escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=", - "dev": true, - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/eventemitter2": { - "version": "6.4.4", - "resolved": "https://registry.npmjs.org/eventemitter2/-/eventemitter2-6.4.4.tgz", - "integrity": "sha512-HLU3NDY6wARrLCEwyGKRBvuWYyvW6mHYv72SJJAH3iJN3a6eVUvkjFkcxah1bcTgGVBBrFdIopBJPhCQFMLyXw==", - "dev": true - }, - "node_modules/execa": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/execa/-/execa-4.1.0.tgz", - "integrity": "sha512-j5W0//W7f8UxAn8hXVnwG8tLwdiUy4FJLcSupCg6maBYZDpyBvTApK7KyuI4bKj8KOh1r2YH+6ucuYtJv1bTZA==", - "dev": true, - "dependencies": { - "cross-spawn": "^7.0.0", - "get-stream": "^5.0.0", - "human-signals": "^1.1.1", - "is-stream": "^2.0.0", - "merge-stream": "^2.0.0", - "npm-run-path": "^4.0.0", - "onetime": "^5.1.0", - "signal-exit": "^3.0.2", - "strip-final-newline": "^2.0.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sindresorhus/execa?sponsor=1" - } - }, - "node_modules/executable": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/executable/-/executable-4.1.1.tgz", - "integrity": "sha512-8iA79xD3uAch729dUG8xaaBBFGaEa0wdD2VkYLFHwlqosEj/jT66AzcreRDSgV7ehnNLBW2WR5jIXwGKjVdTLg==", - "dev": true, - "dependencies": { - "pify": "^2.2.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/exit-hook": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/exit-hook/-/exit-hook-1.1.1.tgz", - "integrity": "sha1-8FyiM7SMBdVP/wd2XfhQfpXAL/g=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/extend": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", - "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==", - "dev": true - }, - "node_modules/extract-zip": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/extract-zip/-/extract-zip-1.7.0.tgz", - "integrity": "sha512-xoh5G1W/PB0/27lXgMQyIhP5DSY/LhoCsOyZgb+6iMmRtCwVBo55uKaMoEYrDCKQhWvqEip5ZPKAc6eFNyf/MA==", - "dev": true, - "dependencies": { - "concat-stream": "^1.6.2", - "debug": "^2.6.9", - "mkdirp": "^0.5.4", - "yauzl": "^2.10.0" - }, - "bin": { - "extract-zip": "cli.js" - } - }, - "node_modules/extract-zip/node_modules/debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dev": true, - "dependencies": { - "ms": "2.0.0" - } - }, - "node_modules/extract-zip/node_modules/ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", - "dev": true - }, - "node_modules/extsprintf": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz", - "integrity": "sha1-lpGEQOMEGnpBT4xS48V06zw+HgU=", - "dev": true, - "engines": [ - "node >=0.6.0" - ] - }, - "node_modules/fast-deep-equal": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", - "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", - "dev": true - }, - "node_modules/fast-json-stable-stringify": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", - "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", - "dev": true - }, - "node_modules/fd-slicer": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/fd-slicer/-/fd-slicer-1.1.0.tgz", - "integrity": "sha1-JcfInLH5B3+IkbvmHY85Dq4lbx4=", - "dev": true, - "dependencies": { - "pend": "~1.2.0" - } - }, - "node_modules/figures": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/figures/-/figures-1.7.0.tgz", - "integrity": "sha1-y+Hjr/zxzUS4DK3+0o3Hk6lwHS4=", - "dev": true, - "dependencies": { - "escape-string-regexp": "^1.0.5", - "object-assign": "^4.1.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/forever-agent": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz", - "integrity": "sha1-+8cfDEGt6zf5bFd60e1C2P2sypE=", - "dev": true, - "engines": { - "node": "*" - } - }, - "node_modules/form-data": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.3.tgz", - "integrity": "sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ==", - "dev": true, - "dependencies": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.6", - "mime-types": "^2.1.12" - }, - "engines": { - "node": ">= 0.12" - } - }, - "node_modules/fs-extra": { - "version": "9.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", - "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", - "dev": true, - "dependencies": { - "at-least-node": "^1.0.0", - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/fs.realpath": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=", - "dev": true - }, - "node_modules/get-stream": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz", - "integrity": "sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==", - "dev": true, - "dependencies": { - "pump": "^3.0.0" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/getos": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/getos/-/getos-3.2.1.tgz", - "integrity": "sha512-U56CfOK17OKgTVqozZjUKNdkfEv6jk5WISBJ8SHoagjE6L69zOwl3Z+O8myjY9MEW3i2HPWQBt/LTbCgcC973Q==", - "dev": true, - "dependencies": { - "async": "^3.2.0" - } - }, - "node_modules/getpass": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz", - "integrity": "sha1-Xv+OPmhNVprkyysSgmBOi6YhSfo=", - "dev": true, - "dependencies": { - "assert-plus": "^1.0.0" - } - }, - "node_modules/glob": { - "version": "7.1.6", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz", - "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==", - "dev": true, - "dependencies": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.0.4", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - }, - "engines": { - "node": "*" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/global-dirs": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/global-dirs/-/global-dirs-2.1.0.tgz", - "integrity": "sha512-MG6kdOUh/xBnyo9cJFeIKkLEc1AyFq42QTU4XiX51i2NEdxLxLWXIjEjmqKeSuKR7pAZjTqUVoT2b2huxVLgYQ==", - "dev": true, - "dependencies": { - "ini": "1.3.7" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/graceful-fs": { - "version": "4.2.6", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.6.tgz", - "integrity": "sha512-nTnJ528pbqxYanhpDYsi4Rd8MAeaBA67+RZ10CM1m3bTAVFEDcd5AuA4a6W5YkGZ1iNXHzZz8T6TBKLeBuNriQ==", - "dev": true - }, - "node_modules/har-schema": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz", - "integrity": "sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI=", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/har-validator": { - "version": "5.1.5", - "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.1.5.tgz", - "integrity": "sha512-nmT2T0lljbxdQZfspsno9hgrG3Uir6Ks5afism62poxqBM6sDnMEuPmzTq8XN0OEwqKLLdh1jQI3qyE66Nzb3w==", - "deprecated": "this library is no longer supported", - "dev": true, - "dependencies": { - "ajv": "^6.12.3", - "har-schema": "^2.0.0" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/has-ansi": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/has-ansi/-/has-ansi-2.0.0.tgz", - "integrity": "sha1-NPUEnOHs3ysGSa8+8k5F7TVBbZE=", - "dev": true, - "dependencies": { - "ansi-regex": "^2.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/http-signature": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.2.0.tgz", - "integrity": "sha1-muzZJRFHcvPZW2WmCruPfBj7rOE=", - "dev": true, - "dependencies": { - "assert-plus": "^1.0.0", - "jsprim": "^1.2.2", - "sshpk": "^1.7.0" - }, - "engines": { - "node": ">=0.8", - "npm": ">=1.3.7" - } - }, - "node_modules/human-signals": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-1.1.1.tgz", - "integrity": "sha512-SEQu7vl8KjNL2eoGBLF3+wAjpsNfA9XMlXAYj/3EdaNfAlxKthD1xjEQfGOUhllCGGJVNY34bRr6lPINhNjyZw==", - "dev": true, - "engines": { - "node": ">=8.12.0" - } - }, - "node_modules/indent-string": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-3.2.0.tgz", - "integrity": "sha1-Sl/W0nzDMvN+VBmlBNu4NxBckok=", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/inflight": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", - "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", - "dev": true, - "dependencies": { - "once": "^1.3.0", - "wrappy": "1" - } - }, - "node_modules/inherits": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", - "dev": true - }, - "node_modules/ini": { - "version": "1.3.7", - "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.7.tgz", - "integrity": "sha512-iKpRpXP+CrP2jyrxvg1kMUpXDyRUFDWurxbnVT1vQPx+Wz9uCYsMIqYuSBLV+PAaZG/d7kRLKRFc9oDMsH+mFQ==", - "dev": true - }, - "node_modules/is-ci": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-ci/-/is-ci-2.0.0.tgz", - "integrity": "sha512-YfJT7rkpQB0updsdHLGWrvhBJfcfzNNawYDNIyQXJz0IViGf75O8EBPKSdvw2rF+LGCsX4FZ8tcr3b19LcZq4w==", - "dev": true, - "dependencies": { - "ci-info": "^2.0.0" - }, - "bin": { - "is-ci": "bin.js" - } - }, - "node_modules/is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/is-installed-globally": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/is-installed-globally/-/is-installed-globally-0.3.2.tgz", - "integrity": "sha512-wZ8x1js7Ia0kecP/CHM/3ABkAmujX7WPvQk6uu3Fly/Mk44pySulQpnHG46OMjHGXApINnV4QhY3SWnECO2z5g==", - "dev": true, - "dependencies": { - "global-dirs": "^2.0.1", - "is-path-inside": "^3.0.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/is-observable": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-observable/-/is-observable-1.1.0.tgz", - "integrity": "sha512-NqCa4Sa2d+u7BWc6CukaObG3Fh+CU9bvixbpcXYhy2VvYS7vVGIdAgnIS5Ks3A/cqk4rebLJ9s8zBstT2aKnIA==", - "dev": true, - "dependencies": { - "symbol-observable": "^1.1.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/is-path-inside": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", - "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/is-promise": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/is-promise/-/is-promise-2.2.2.tgz", - "integrity": "sha512-+lP4/6lKUBfQjZ2pdxThZvLUAafmZb8OAxFb8XXtiQmS35INgr85hdOGoEs124ez1FCnZJt6jau/T+alh58QFQ==", - "dev": true - }, - "node_modules/is-stream": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.0.tgz", - "integrity": "sha512-XCoy+WlUr7d1+Z8GgSuXmpuUFC9fOhRXglJMx+dwLKTkL44Cjd4W1Z5P+BQZpr+cR93aGP4S/s7Ftw6Nd/kiEw==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/is-typedarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", - "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=", - "dev": true - }, - "node_modules/is-unicode-supported": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz", - "integrity": "sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==", - "dev": true, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/isarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=", - "dev": true - }, - "node_modules/isexe": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=", - "dev": true - }, - "node_modules/isstream": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz", - "integrity": "sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo=", - "dev": true - }, - "node_modules/js-yaml": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.0.0.tgz", - "integrity": "sha512-pqon0s+4ScYUvX30wxQi3PogGFAlUyH0awepWvwkj4jD4v+ova3RiYw8bmA6x2rDrEaj8i/oWKoRxpVNW+Re8Q==", - "dev": true, - "dependencies": { - "argparse": "^2.0.1" - }, - "bin": { - "js-yaml": "bin/js-yaml.js" - } - }, - "node_modules/jsbn": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz", - "integrity": "sha1-peZUwuWi3rXyAdls77yoDA7y9RM=", - "dev": true - }, - "node_modules/json-schema": { - "version": "0.2.3", - "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.2.3.tgz", - "integrity": "sha1-tIDIkuWaLwWVTOcnvT8qTogvnhM=", - "dev": true - }, - "node_modules/json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", - "dev": true - }, - "node_modules/json-stringify-safe": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", - "integrity": "sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus=", - "dev": true - }, - "node_modules/jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", - "dev": true, - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/jsprim": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.1.tgz", - "integrity": "sha1-MT5mvB5cwG5Di8G3SZwuXFastqI=", - "dev": true, - "engines": [ - "node >=0.6.0" - ], - "dependencies": { - "assert-plus": "1.0.0", - "extsprintf": "1.3.0", - "json-schema": "0.2.3", - "verror": "1.10.0" - } - }, - "node_modules/lazy-ass": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/lazy-ass/-/lazy-ass-1.6.0.tgz", - "integrity": "sha1-eZllXoZGwX8In90YfRUNMyTVRRM=", - "dev": true, - "engines": { - "node": "> 0.8" - } - }, - "node_modules/listr": { - "version": "0.14.3", - "resolved": "https://registry.npmjs.org/listr/-/listr-0.14.3.tgz", - "integrity": "sha512-RmAl7su35BFd/xoMamRjpIE4j3v+L28o8CT5YhAXQJm1fD+1l9ngXY8JAQRJ+tFK2i5njvi0iRUKV09vPwA0iA==", - "dev": true, - "dependencies": { - "@samverschueren/stream-to-observable": "^0.3.0", - "is-observable": "^1.1.0", - "is-promise": "^2.1.0", - "is-stream": "^1.1.0", - "listr-silent-renderer": "^1.1.1", - "listr-update-renderer": "^0.5.0", - "listr-verbose-renderer": "^0.5.0", - "p-map": "^2.0.0", - "rxjs": "^6.3.3" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/listr-silent-renderer": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/listr-silent-renderer/-/listr-silent-renderer-1.1.1.tgz", - "integrity": "sha1-kktaN1cVN3C/Go4/v3S4u/P5JC4=", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/listr-update-renderer": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/listr-update-renderer/-/listr-update-renderer-0.5.0.tgz", - "integrity": "sha512-tKRsZpKz8GSGqoI/+caPmfrypiaq+OQCbd+CovEC24uk1h952lVj5sC7SqyFUm+OaJ5HN/a1YLt5cit2FMNsFA==", - "dev": true, - "dependencies": { - "chalk": "^1.1.3", - "cli-truncate": "^0.2.1", - "elegant-spinner": "^1.0.1", - "figures": "^1.7.0", - "indent-string": "^3.0.0", - "log-symbols": "^1.0.2", - "log-update": "^2.3.0", - "strip-ansi": "^3.0.1" - }, - "engines": { - "node": ">=6" - }, - "peerDependencies": { - "listr": "^0.14.2" - } - }, - "node_modules/listr-update-renderer/node_modules/ansi-styles": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-2.2.1.tgz", - "integrity": "sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/listr-update-renderer/node_modules/chalk": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-1.1.3.tgz", - "integrity": "sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg=", - "dev": true, - "dependencies": { - "ansi-styles": "^2.2.1", - "escape-string-regexp": "^1.0.2", - "has-ansi": "^2.0.0", - "strip-ansi": "^3.0.0", - "supports-color": "^2.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/listr-update-renderer/node_modules/log-symbols": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-1.0.2.tgz", - "integrity": "sha1-N2/3tY6jCGoPCfrMdGF+ylAeGhg=", - "dev": true, - "dependencies": { - "chalk": "^1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/listr-update-renderer/node_modules/supports-color": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-2.0.0.tgz", - "integrity": "sha1-U10EXOa2Nj+kARcIRimZXp3zJMc=", - "dev": true, - "engines": { - "node": ">=0.8.0" - } - }, - "node_modules/listr-verbose-renderer": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/listr-verbose-renderer/-/listr-verbose-renderer-0.5.0.tgz", - "integrity": "sha512-04PDPqSlsqIOaaaGZ+41vq5FejI9auqTInicFRndCBgE3bXG8D6W1I+mWhk+1nqbHmyhla/6BUrd5OSiHwKRXw==", - "dev": true, - "dependencies": { - "chalk": "^2.4.1", - "cli-cursor": "^2.1.0", - "date-fns": "^1.27.2", - "figures": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/listr-verbose-renderer/node_modules/ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dev": true, - "dependencies": { - "color-convert": "^1.9.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/listr-verbose-renderer/node_modules/chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "dev": true, - "dependencies": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/listr-verbose-renderer/node_modules/cli-cursor": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-2.1.0.tgz", - "integrity": "sha1-s12sN2R5+sw+lHR9QdDQ9SOP/LU=", - "dev": true, - "dependencies": { - "restore-cursor": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/listr-verbose-renderer/node_modules/color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "dev": true, - "dependencies": { - "color-name": "1.1.3" - } - }, - "node_modules/listr-verbose-renderer/node_modules/color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=", - "dev": true - }, - "node_modules/listr-verbose-renderer/node_modules/figures": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/figures/-/figures-2.0.0.tgz", - "integrity": "sha1-OrGi0qYsi/tDGgyUy3l6L84nyWI=", - "dev": true, - "dependencies": { - "escape-string-regexp": "^1.0.5" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/listr-verbose-renderer/node_modules/has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/listr-verbose-renderer/node_modules/mimic-fn": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-1.2.0.tgz", - "integrity": "sha512-jf84uxzwiuiIVKiOLpfYk7N46TSy8ubTonmneY9vrpHNAnp0QBt2BxWV9dO3/j+BoVAb+a5G6YDPW3M5HOdMWQ==", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/listr-verbose-renderer/node_modules/onetime": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/onetime/-/onetime-2.0.1.tgz", - "integrity": "sha1-BnQoIw/WdEOyeUsiu6UotoZ5YtQ=", - "dev": true, - "dependencies": { - "mimic-fn": "^1.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/listr-verbose-renderer/node_modules/restore-cursor": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-2.0.0.tgz", - "integrity": "sha1-n37ih/gv0ybU/RYpI9YhKe7g368=", - "dev": true, - "dependencies": { - "onetime": "^2.0.0", - "signal-exit": "^3.0.2" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/listr-verbose-renderer/node_modules/supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "dev": true, - "dependencies": { - "has-flag": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/listr/node_modules/is-stream": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", - "integrity": "sha1-EtSj3U5o4Lec6428hBc66A2RykQ=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/lodash": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", - "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", - "dev": true - }, - "node_modules/lodash.once": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/lodash.once/-/lodash.once-4.1.1.tgz", - "integrity": "sha1-DdOXEhPHxW34gJd9UEyI+0cal6w=", - "dev": true - }, - "node_modules/log-symbols": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.1.0.tgz", - "integrity": "sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==", - "dev": true, - "dependencies": { - "chalk": "^4.1.0", - "is-unicode-supported": "^0.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/log-update": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/log-update/-/log-update-2.3.0.tgz", - "integrity": "sha1-iDKP19HOeTiykoN0bwsbwSayRwg=", - "dev": true, - "dependencies": { - "ansi-escapes": "^3.0.0", - "cli-cursor": "^2.0.0", - "wrap-ansi": "^3.0.1" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/log-update/node_modules/cli-cursor": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-2.1.0.tgz", - "integrity": "sha1-s12sN2R5+sw+lHR9QdDQ9SOP/LU=", - "dev": true, - "dependencies": { - "restore-cursor": "^2.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/log-update/node_modules/mimic-fn": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-1.2.0.tgz", - "integrity": "sha512-jf84uxzwiuiIVKiOLpfYk7N46TSy8ubTonmneY9vrpHNAnp0QBt2BxWV9dO3/j+BoVAb+a5G6YDPW3M5HOdMWQ==", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/log-update/node_modules/onetime": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/onetime/-/onetime-2.0.1.tgz", - "integrity": "sha1-BnQoIw/WdEOyeUsiu6UotoZ5YtQ=", - "dev": true, - "dependencies": { - "mimic-fn": "^1.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/log-update/node_modules/restore-cursor": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-2.0.0.tgz", - "integrity": "sha1-n37ih/gv0ybU/RYpI9YhKe7g368=", - "dev": true, - "dependencies": { - "onetime": "^2.0.0", - "signal-exit": "^3.0.2" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/merge-stream": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", - "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", - "dev": true - }, - "node_modules/mime-db": { - "version": "1.46.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.46.0.tgz", - "integrity": "sha512-svXaP8UQRZ5K7or+ZmfNhg2xX3yKDMUzqadsSqi4NCH/KomcH75MAMYAGVlvXn4+b/xOPhS3I2uHKRUzvjY7BQ==", - "dev": true, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/mime-types": { - "version": "2.1.29", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.29.tgz", - "integrity": "sha512-Y/jMt/S5sR9OaqteJtslsFZKWOIIqMACsJSiHghlCAyhf7jfVYjKBmLiX8OgpWeW+fjJ2b+Az69aPFPkUOY6xQ==", - "dev": true, - "dependencies": { - "mime-db": "1.46.0" - }, - "engines": { - "node": ">= 0.6" - } - }, - "node_modules/mimic-fn": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", - "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/minimatch": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", - "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", - "dev": true, - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, - "node_modules/minimist": { - "version": "1.2.6", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.6.tgz", - "integrity": "sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q==", - "dev": true - }, - "node_modules/mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "dev": true, - "dependencies": { - "minimist": "^1.2.5" - }, - "bin": { - "mkdirp": "bin/cmd.js" - } - }, - "node_modules/moment": { - "version": "2.29.2", - "resolved": "https://registry.npmjs.org/moment/-/moment-2.29.2.tgz", - "integrity": "sha512-UgzG4rvxYpN15jgCmVJwac49h9ly9NurikMWGPdVxm8GZD6XjkKPxDTjQQ43gtGgnV3X0cAyWDdP2Wexoquifg==", - "dev": true, - "engines": { - "node": "*" - } - }, - "node_modules/ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "node_modules/npm-run-path": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", - "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", - "dev": true, - "dependencies": { - "path-key": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/number-is-nan": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/number-is-nan/-/number-is-nan-1.0.1.tgz", - "integrity": "sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/oauth-sign": { - "version": "0.9.0", - "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz", - "integrity": "sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ==", - "dev": true, - "engines": { - "node": "*" - } - }, - "node_modules/object-assign": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", - "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/once": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", - "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", - "dev": true, - "dependencies": { - "wrappy": "1" - } - }, - "node_modules/onetime": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", - "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", - "dev": true, - "dependencies": { - "mimic-fn": "^2.1.0" - }, - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/ospath": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/ospath/-/ospath-1.2.2.tgz", - "integrity": "sha1-EnZjl3Sj+O8lcvf+QoDg6kVQwHs=", - "dev": true - }, - "node_modules/p-map": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/p-map/-/p-map-2.1.0.tgz", - "integrity": "sha512-y3b8Kpd8OAN444hxfBbFfj1FY/RjtTd8tzYwhUqNYXx0fXx2iX4maP4Qr6qhIKbQXI02wTLAda4fYUbDagTUFw==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/path-is-absolute": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", - "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/path-key": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", - "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/pend": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/pend/-/pend-1.2.0.tgz", - "integrity": "sha1-elfrVQpng/kRUzH89GY9XI4AelA=", - "dev": true - }, - "node_modules/performance-now": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", - "integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns=", - "dev": true - }, - "node_modules/pify": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", - "integrity": "sha1-7RQaasBDqEnqWISY59yosVMw6Qw=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/pretty-bytes": { - "version": "5.6.0", - "resolved": "https://registry.npmjs.org/pretty-bytes/-/pretty-bytes-5.6.0.tgz", - "integrity": "sha512-FFw039TmrBqFK8ma/7OL3sDz/VytdtJr044/QUJtH0wK9lb9jLq9tJyIxUwtQJHwar2BqtiA4iCWSwo9JLkzFg==", - "dev": true, - "engines": { - "node": ">=6" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/process-nextick-args": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", - "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==", - "dev": true - }, - "node_modules/psl": { - "version": "1.8.0", - "resolved": "https://registry.npmjs.org/psl/-/psl-1.8.0.tgz", - "integrity": "sha512-RIdOzyoavK+hA18OGGWDqUTsCLhtA7IcZ/6NCs4fFJaHBDab+pDDmDIByWFRQJq2Cd7r1OoQxBGKOaztq+hjIQ==", - "dev": true - }, - "node_modules/pump": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", - "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", - "dev": true, - "dependencies": { - "end-of-stream": "^1.1.0", - "once": "^1.3.1" - } - }, - "node_modules/punycode": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz", - "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/qs": { - "version": "6.5.2", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz", - "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==", - "dev": true, - "engines": { - "node": ">=0.6" - } - }, - "node_modules/querystring": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/querystring/-/querystring-0.2.0.tgz", - "integrity": "sha1-sgmEkgO7Jd+CDadW50cAWHhSFiA=", - "dev": true, - "engines": { - "node": ">=0.4.x" - } - }, - "node_modules/ramda": { - "version": "0.27.1", - "resolved": "https://registry.npmjs.org/ramda/-/ramda-0.27.1.tgz", - "integrity": "sha512-PgIdVpn5y5Yns8vqb8FzBUEYn98V3xcPgawAkkgj0YJ0qDsnHCiNmZYfOGMgOvoB0eWFLpYbhxUR3mxfDIMvpw==", - "dev": true - }, - "node_modules/readable-stream": { - "version": "2.3.7", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", - "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", - "dev": true, - "dependencies": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" - } - }, - "node_modules/readable-stream/node_modules/safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", - "dev": true - }, - "node_modules/request-progress": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/request-progress/-/request-progress-3.0.0.tgz", - "integrity": "sha1-TKdUCBx/7GP1BeT6qCWqBs1mnb4=", - "dev": true, - "dependencies": { - "throttleit": "^1.0.0" - } - }, - "node_modules/restore-cursor": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-1.0.1.tgz", - "integrity": "sha1-NGYfRohjJ/7SmRR5FSJS35LapUE=", - "dev": true, - "dependencies": { - "exit-hook": "^1.0.0", - "onetime": "^1.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/restore-cursor/node_modules/onetime": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/onetime/-/onetime-1.1.0.tgz", - "integrity": "sha1-ofeDj4MUxRbwXs78vEzP4EtO14k=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/rimraf": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", - "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", - "dev": true, - "dependencies": { - "glob": "^7.1.3" - }, - "bin": { - "rimraf": "bin.js" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, - "node_modules/rxjs": { - "version": "6.6.6", - "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-6.6.6.tgz", - "integrity": "sha512-/oTwee4N4iWzAMAL9xdGKjkEHmIwupR3oXbQjCKywF1BeFohswF3vZdogbmEF6pZkOsXTzWkrZszrWpQTByYVg==", - "dev": true, - "dependencies": { - "tslib": "^1.9.0" - }, - "engines": { - "npm": ">=2.0.0" - } - }, - "node_modules/safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ] - }, - "node_modules/safer-buffer": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", - "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", - "dev": true - }, - "node_modules/shebang-command": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", - "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", - "dev": true, - "dependencies": { - "shebang-regex": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/shebang-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", - "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/signal-exit": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.3.tgz", - "integrity": "sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA==", - "dev": true - }, - "node_modules/slice-ansi": { - "version": "0.0.4", - "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-0.0.4.tgz", - "integrity": "sha1-7b+JA/ZvfOL46v1s7tZeJkyDGzU=", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/sshpk": { - "version": "1.16.1", - "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.16.1.tgz", - "integrity": "sha512-HXXqVUq7+pcKeLqqZj6mHFUMvXtOJt1uoUx09pFW6011inTMxqI8BA8PM95myrIyyKwdnzjdFjLiE6KBPVtJIg==", - "dev": true, - "dependencies": { - "asn1": "~0.2.3", - "assert-plus": "^1.0.0", - "bcrypt-pbkdf": "^1.0.0", - "dashdash": "^1.12.0", - "ecc-jsbn": "~0.1.1", - "getpass": "^0.1.1", - "jsbn": "~0.1.0", - "safer-buffer": "^2.0.2", - "tweetnacl": "~0.14.0" - }, - "bin": { - "sshpk-conv": "bin/sshpk-conv", - "sshpk-sign": "bin/sshpk-sign", - "sshpk-verify": "bin/sshpk-verify" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/string_decoder": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", - "dev": true, - "dependencies": { - "safe-buffer": "~5.1.0" - } - }, - "node_modules/string_decoder/node_modules/safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", - "dev": true - }, - "node_modules/string-width": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.2.tgz", - "integrity": "sha512-XBJbT3N4JhVumXE0eoLU9DCjcaF92KLNqTmFCnG1pf8duUxFGwtP6AD6nkjw9a3IdiRtL3E2w3JDiE/xi3vOeA==", - "dev": true, - "dependencies": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/string-width/node_modules/ansi-regex": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", - "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/string-width/node_modules/strip-ansi": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", - "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", - "dev": true, - "dependencies": { - "ansi-regex": "^5.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/strip-ansi": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", - "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=", - "dev": true, - "dependencies": { - "ansi-regex": "^2.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/strip-final-newline": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", - "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", - "dev": true, - "engines": { - "node": ">=6" - } - }, - "node_modules/supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "dependencies": { - "has-flag": "^4.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/symbol-observable": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/symbol-observable/-/symbol-observable-1.2.0.tgz", - "integrity": "sha512-e900nM8RRtGhlV36KGEU9k65K3mPb1WV70OdjfxlG2EAuM1noi/E/BaW/uMhL7bPEssK8QV57vN3esixjUvcXQ==", - "dev": true, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/throttleit": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/throttleit/-/throttleit-1.0.0.tgz", - "integrity": "sha1-nnhYNtr0Z0MUWlmEtiaNgoUorGw=", - "dev": true - }, - "node_modules/tmp": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.2.1.tgz", - "integrity": "sha512-76SUhtfqR2Ijn+xllcI5P1oyannHNHByD80W1q447gU3mp9G9PSpGdWmjUOHRDPiHYacIk66W7ubDTuPF3BEtQ==", - "dev": true, - "dependencies": { - "rimraf": "^3.0.0" - }, - "engines": { - "node": ">=8.17.0" - } - }, - "node_modules/tough-cookie": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.5.0.tgz", - "integrity": "sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g==", - "dev": true, - "dependencies": { - "psl": "^1.1.28", - "punycode": "^2.1.1" - }, - "engines": { - "node": ">=0.8" - } - }, - "node_modules/tslib": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz", - "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==", - "dev": true - }, - "node_modules/tunnel-agent": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", - "integrity": "sha1-J6XeoGs2sEoKmWZ3SykIaPD8QP0=", - "dev": true, - "dependencies": { - "safe-buffer": "^5.0.1" - }, - "engines": { - "node": "*" - } - }, - "node_modules/tweetnacl": { - "version": "0.14.5", - "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz", - "integrity": "sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q=", - "dev": true - }, - "node_modules/typedarray": { - "version": "0.0.6", - "resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz", - "integrity": "sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c=", - "dev": true - }, - "node_modules/universalify": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", - "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", - "dev": true, - "engines": { - "node": ">= 10.0.0" - } - }, - "node_modules/untildify": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/untildify/-/untildify-4.0.0.tgz", - "integrity": "sha512-KK8xQ1mkzZeg9inewmFVDNkg3l5LUhoq9kN6iWYB/CC9YMG8HA+c1Q8HwDe6dEX7kErrEVNVBO3fWsVq5iDgtw==", - "dev": true, - "engines": { - "node": ">=8" - } - }, - "node_modules/uri-js": { - "version": "4.4.1", - "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", - "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", - "dev": true, - "dependencies": { - "punycode": "^2.1.0" - } - }, - "node_modules/url": { - "version": "0.11.0", - "resolved": "https://registry.npmjs.org/url/-/url-0.11.0.tgz", - "integrity": "sha1-ODjpfPxgUh63PFJajlW/3Z4uKPE=", - "dev": true, - "dependencies": { - "punycode": "1.3.2", - "querystring": "0.2.0" - } - }, - "node_modules/url/node_modules/punycode": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.3.2.tgz", - "integrity": "sha1-llOgNvt8HuQjQvIyXM7v6jkmxI0=", - "dev": true - }, - "node_modules/util-deprecate": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", - "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=", - "dev": true - }, - "node_modules/uuid": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz", - "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==", - "dev": true, - "bin": { - "uuid": "bin/uuid" - } - }, - "node_modules/verror": { - "version": "1.10.0", - "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz", - "integrity": "sha1-OhBcoXBTr1XW4nDB+CiGguGNpAA=", - "dev": true, - "engines": [ - "node >=0.6.0" - ], - "dependencies": { - "assert-plus": "^1.0.0", - "core-util-is": "1.0.2", - "extsprintf": "^1.2.0" - } - }, - "node_modules/which": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", - "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", - "dev": true, - "dependencies": { - "isexe": "^2.0.0" - }, - "bin": { - "node-which": "bin/node-which" - }, - "engines": { - "node": ">= 8" - } - }, - "node_modules/wrap-ansi": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-3.0.1.tgz", - "integrity": "sha1-KIoE2H7aXChuBg3+jxNc6NAH+Lo=", - "dev": true, - "dependencies": { - "string-width": "^2.1.1", - "strip-ansi": "^4.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/wrap-ansi/node_modules/ansi-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz", - "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/wrap-ansi/node_modules/is-fullwidth-code-point": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", - "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", - "dev": true, - "engines": { - "node": ">=4" - } - }, - "node_modules/wrap-ansi/node_modules/string-width": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-2.1.1.tgz", - "integrity": "sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==", - "dev": true, - "dependencies": { - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^4.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/wrap-ansi/node_modules/strip-ansi": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz", - "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=", - "dev": true, - "dependencies": { - "ansi-regex": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/wrappy": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=", - "dev": true - }, - "node_modules/yauzl": { - "version": "2.10.0", - "resolved": "https://registry.npmjs.org/yauzl/-/yauzl-2.10.0.tgz", - "integrity": "sha1-x+sXyT4RLLEIb6bY5R+wZnt5pfk=", - "dev": true, - "dependencies": { - "buffer-crc32": "~0.2.3", - "fd-slicer": "~1.1.0" - } - } - }, - "dependencies": { - "@cypress/listr-verbose-renderer": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/@cypress/listr-verbose-renderer/-/listr-verbose-renderer-0.4.1.tgz", - "integrity": "sha1-p3SS9LEdzHxEajSz4ochr9M8ZCo=", - "dev": true, - "requires": { - "chalk": "^1.1.3", - "cli-cursor": "^1.0.2", - "date-fns": "^1.27.2", - "figures": "^1.7.0" - }, - "dependencies": { - "ansi-styles": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-2.2.1.tgz", - "integrity": "sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4=", - "dev": true - }, - "chalk": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-1.1.3.tgz", - "integrity": "sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg=", - "dev": true, - "requires": { - "ansi-styles": "^2.2.1", - "escape-string-regexp": "^1.0.2", - "has-ansi": "^2.0.0", - "strip-ansi": "^3.0.0", - "supports-color": "^2.0.0" - } - }, - "supports-color": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-2.0.0.tgz", - "integrity": "sha1-U10EXOa2Nj+kARcIRimZXp3zJMc=", - "dev": true - } - } - }, - "@cypress/request": { - "version": "2.88.5", - "resolved": "https://registry.npmjs.org/@cypress/request/-/request-2.88.5.tgz", - "integrity": "sha512-TzEC1XMi1hJkywWpRfD2clreTa/Z+lOrXDCxxBTBPEcY5azdPi56A6Xw+O4tWJnaJH3iIE7G5aDXZC6JgRZLcA==", - "dev": true, - "requires": { - "aws-sign2": "~0.7.0", - "aws4": "^1.8.0", - "caseless": "~0.12.0", - "combined-stream": "~1.0.6", - "extend": "~3.0.2", - "forever-agent": "~0.6.1", - "form-data": "~2.3.2", - "har-validator": "~5.1.3", - "http-signature": "~1.2.0", - "is-typedarray": "~1.0.0", - "isstream": "~0.1.2", - "json-stringify-safe": "~5.0.1", - "mime-types": "~2.1.19", - "oauth-sign": "~0.9.0", - "performance-now": "^2.1.0", - "qs": "~6.5.2", - "safe-buffer": "^5.1.2", - "tough-cookie": "~2.5.0", - "tunnel-agent": "^0.6.0", - "uuid": "^3.3.2" - } - }, - "@cypress/xvfb": { - "version": "1.2.4", - "resolved": "https://registry.npmjs.org/@cypress/xvfb/-/xvfb-1.2.4.tgz", - "integrity": "sha512-skbBzPggOVYCbnGgV+0dmBdW/s77ZkAOXIC1knS8NagwDjBrNC1LuXtQJeiN6l+m7lzmHtaoUw/ctJKdqkG57Q==", - "dev": true, - "requires": { - "debug": "^3.1.0", - "lodash.once": "^4.1.1" - }, - "dependencies": { - "debug": { - "version": "3.2.7", - "resolved": "https://registry.npmjs.org/debug/-/debug-3.2.7.tgz", - "integrity": "sha512-CFjzYYAi4ThfiQvizrFQevTTXHtnCqWfe7x1AhgEscTz6ZbLbfoLRLPugTQyBth6f8ZERVUSyWHFD/7Wu4t1XQ==", - "dev": true, - "requires": { - "ms": "^2.1.1" - } - } - } - }, - "@samverschueren/stream-to-observable": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/@samverschueren/stream-to-observable/-/stream-to-observable-0.3.1.tgz", - "integrity": "sha512-c/qwwcHyafOQuVQJj0IlBjf5yYgBI7YPJ77k4fOJYesb41jio65eaJODRUmfYKhTOFBrIZ66kgvGPlNbjuoRdQ==", - "dev": true, - "requires": { - "any-observable": "^0.3.0" - } - }, - "@types/node": { - "version": "12.12.50", - "resolved": "https://registry.npmjs.org/@types/node/-/node-12.12.50.tgz", - "integrity": "sha512-5ImO01Fb8YsEOYpV+aeyGYztcYcjGsBvN4D7G5r1ef2cuQOpymjWNQi5V0rKHE6PC2ru3HkoUr/Br2/8GUA84w==", - "dev": true - }, - "@types/sinonjs__fake-timers": { - "version": "6.0.2", - "resolved": "https://registry.npmjs.org/@types/sinonjs__fake-timers/-/sinonjs__fake-timers-6.0.2.tgz", - "integrity": "sha512-dIPoZ3g5gcx9zZEszaxLSVTvMReD3xxyyDnQUjA6IYDG9Ba2AV0otMPs+77sG9ojB4Qr2N2Vk5RnKeuA0X/0bg==", - "dev": true - }, - "@types/sizzle": { - "version": "2.3.2", - "resolved": "https://registry.npmjs.org/@types/sizzle/-/sizzle-2.3.2.tgz", - "integrity": "sha512-7EJYyKTL7tFR8+gDbB6Wwz/arpGa0Mywk1TJbNzKzHtzbwVmY4HR9WqS5VV7dsBUKQmPNr192jHr/VpBluj/hg==", - "dev": true - }, - "ajv": { - "version": "6.12.6", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", - "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", - "dev": true, - "requires": { - "fast-deep-equal": "^3.1.1", - "fast-json-stable-stringify": "^2.0.0", - "json-schema-traverse": "^0.4.1", - "uri-js": "^4.2.2" - } - }, - "ansi-escapes": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-3.2.0.tgz", - "integrity": "sha512-cBhpre4ma+U0T1oM5fXg7Dy1Jw7zzwv7lt/GoCpr+hDQJoYnKVPLL4dCvSEFMmQurOQvSrwT7SL/DAlhBI97RQ==", - "dev": true - }, - "ansi-regex": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-2.1.1.tgz", - "integrity": "sha1-w7M6te42DYbg5ijwRorn7yfWVN8=", - "dev": true - }, - "ansi-styles": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", - "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, - "requires": { - "color-convert": "^2.0.1" - } - }, - "any-observable": { - "version": "0.3.0", - "resolved": "https://registry.npmjs.org/any-observable/-/any-observable-0.3.0.tgz", - "integrity": "sha512-/FQM1EDkTsf63Ub2C6O7GuYFDsSXUwsaZDurV0np41ocwq0jthUAYCmhBX9f+KwlaCgIuWyr/4WlUQUBfKfZog==", - "dev": true - }, - "arch": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/arch/-/arch-2.2.0.tgz", - "integrity": "sha512-Of/R0wqp83cgHozfIYLbBMnej79U/SVGOOyuB3VVFv1NRM/PSFMK12x9KVtiYzJqmnU5WR2qp0Z5rHb7sWGnFQ==", - "dev": true - }, - "argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", - "dev": true - }, - "asn1": { - "version": "0.2.4", - "resolved": "https://registry.npmjs.org/asn1/-/asn1-0.2.4.tgz", - "integrity": "sha512-jxwzQpLQjSmWXgwaCZE9Nz+glAG01yF1QnWgbhGwHI5A6FRIEY6IVqtHhIepHqI7/kyEyQEagBC5mBEFlIYvdg==", - "dev": true, - "requires": { - "safer-buffer": "~2.1.0" - } - }, - "assert-plus": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/assert-plus/-/assert-plus-1.0.0.tgz", - "integrity": "sha1-8S4PPF13sLHN2RRpQuTpbB5N1SU=", - "dev": true - }, - "async": { - "version": "3.2.3", - "resolved": "https://registry.npmjs.org/async/-/async-3.2.3.tgz", - "integrity": "sha512-spZRyzKL5l5BZQrr/6m/SqFdBN0q3OCI0f9rjfBzCMBIP4p75P620rR3gTmaksNOhmzgdxcaxdNfMy6anrbM0g==", - "dev": true - }, - "asynckit": { - "version": "0.4.0", - "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", - "integrity": "sha1-x57Zf380y48robyXkLzDZkdLS3k=", - "dev": true - }, - "at-least-node": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/at-least-node/-/at-least-node-1.0.0.tgz", - "integrity": "sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg==", - "dev": true - }, - "aws-sign2": { - "version": "0.7.0", - "resolved": "https://registry.npmjs.org/aws-sign2/-/aws-sign2-0.7.0.tgz", - "integrity": "sha1-tG6JCTSpWR8tL2+G1+ap8bP+dqg=", - "dev": true - }, - "aws4": { - "version": "1.11.0", - "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.11.0.tgz", - "integrity": "sha512-xh1Rl34h6Fi1DC2WWKfxUTVqRsNnr6LsKz2+hfwDxQJWmrx8+c7ylaqBMcHfl1U1r2dsifOvKX3LQuLNZ+XSvA==", - "dev": true - }, - "balanced-match": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.0.tgz", - "integrity": "sha1-ibTRmasr7kneFk6gK4nORi1xt2c=", - "dev": true - }, - "bcrypt-pbkdf": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/bcrypt-pbkdf/-/bcrypt-pbkdf-1.0.2.tgz", - "integrity": "sha1-pDAdOJtqQ/m2f/PKEaP2Y342Dp4=", - "dev": true, - "requires": { - "tweetnacl": "^0.14.3" - } - }, - "blob-util": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/blob-util/-/blob-util-2.0.2.tgz", - "integrity": "sha512-T7JQa+zsXXEa6/8ZhHcQEW1UFfVM49Ts65uBkFL6fz2QmrElqmbajIDJvuA0tEhRe5eIjpV9ZF+0RfZR9voJFQ==", - "dev": true - }, - "bluebird": { - "version": "3.7.2", - "resolved": "https://registry.npmjs.org/bluebird/-/bluebird-3.7.2.tgz", - "integrity": "sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg==", - "dev": true - }, - "brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dev": true, - "requires": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "buffer-crc32": { - "version": "0.2.13", - "resolved": "https://registry.npmjs.org/buffer-crc32/-/buffer-crc32-0.2.13.tgz", - "integrity": "sha1-DTM+PwDqxQqhRUq9MO+MKl2ackI=", - "dev": true - }, - "buffer-from": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/buffer-from/-/buffer-from-1.1.1.tgz", - "integrity": "sha512-MQcXEUbCKtEo7bhqEs6560Hyd4XaovZlO/k9V3hjVUF/zwW7KBVdSK4gIt/bzwS9MbR5qob+F5jusZsb0YQK2A==", - "dev": true - }, - "cachedir": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/cachedir/-/cachedir-2.3.0.tgz", - "integrity": "sha512-A+Fezp4zxnit6FanDmv9EqXNAi3vt9DWp51/71UEhXukb7QUuvtv9344h91dyAxuTLoSYJFU299qzR3tzwPAhw==", - "dev": true - }, - "caseless": { - "version": "0.12.0", - "resolved": "https://registry.npmjs.org/caseless/-/caseless-0.12.0.tgz", - "integrity": "sha1-G2gcIf+EAzyCZUMJBolCDRhxUdw=", - "dev": true - }, - "chalk": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.0.tgz", - "integrity": "sha512-qwx12AxXe2Q5xQ43Ac//I6v5aXTipYrSESdOgzrN+9XjgEpyjpKuvSGaN4qE93f7TQTlerQQ8S+EQ0EyDoVL1A==", - "dev": true, - "requires": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - } - }, - "check-more-types": { - "version": "2.24.0", - "resolved": "https://registry.npmjs.org/check-more-types/-/check-more-types-2.24.0.tgz", - "integrity": "sha1-FCD/sQ/URNz8ebQ4kbv//TKoRgA=", - "dev": true - }, - "ci-info": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-2.0.0.tgz", - "integrity": "sha512-5tK7EtrZ0N+OLFMthtqOj4fI2Jeb88C4CAZPu25LDVUgXJ0A3Js4PMGqrn0JU1W0Mh1/Z8wZzYPxqUrXeBboCQ==", - "dev": true - }, - "cli-cursor": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-1.0.2.tgz", - "integrity": "sha1-ZNo/fValRBLll5S9Ytw1KV6PKYc=", - "dev": true, - "requires": { - "restore-cursor": "^1.0.1" - } - }, - "cli-table3": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/cli-table3/-/cli-table3-0.6.0.tgz", - "integrity": "sha512-gnB85c3MGC7Nm9I/FkiasNBOKjOiO1RNuXXarQms37q4QMpWdlbBgD/VnOStA2faG1dpXMv31RFApjX1/QdgWQ==", - "dev": true, - "requires": { - "colors": "^1.1.2", - "object-assign": "^4.1.0", - "string-width": "^4.2.0" - } - }, - "cli-truncate": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/cli-truncate/-/cli-truncate-0.2.1.tgz", - "integrity": "sha1-nxXPuwcFAFNpIWxiasfQWrkN1XQ=", - "dev": true, - "requires": { - "slice-ansi": "0.0.4", - "string-width": "^1.0.1" - }, - "dependencies": { - "is-fullwidth-code-point": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-1.0.0.tgz", - "integrity": "sha1-754xOG8DGn8NZDr4L95QxFfvAMs=", - "dev": true, - "requires": { - "number-is-nan": "^1.0.0" - } - }, - "string-width": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-1.0.2.tgz", - "integrity": "sha1-EYvfW4zcUaKn5w0hHgfisLmxB9M=", - "dev": true, - "requires": { - "code-point-at": "^1.0.0", - "is-fullwidth-code-point": "^1.0.0", - "strip-ansi": "^3.0.0" - } - } - } - }, - "code-point-at": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/code-point-at/-/code-point-at-1.1.0.tgz", - "integrity": "sha1-DQcLTQQ6W+ozovGkDi7bPZpMz3c=", - "dev": true - }, - "color-convert": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", - "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, - "requires": { - "color-name": "~1.1.4" - } - }, - "color-name": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true - }, - "colors": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/colors/-/colors-1.4.0.tgz", - "integrity": "sha512-a+UqTh4kgZg/SlGvfbzDHpgRu7AAQOmmqRHJnxhRZICKFUT91brVhNNt58CMWU9PsBbv3PDCZUHbVxuDiH2mtA==", - "dev": true, - "optional": true - }, - "combined-stream": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", - "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", - "dev": true, - "requires": { - "delayed-stream": "~1.0.0" - } - }, - "commander": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/commander/-/commander-5.1.0.tgz", - "integrity": "sha512-P0CysNDQ7rtVw4QIQtm+MRxV66vKFSvlsQvGYXZWR3qFU0jlMKHZZZgw8e+8DSah4UDKMqnknRDQz+xuQXQ/Zg==", - "dev": true - }, - "common-tags": { - "version": "1.8.0", - "resolved": "https://registry.npmjs.org/common-tags/-/common-tags-1.8.0.tgz", - "integrity": "sha512-6P6g0uetGpW/sdyUy/iQQCbFF0kWVMSIVSyYz7Zgjcgh8mgw8PQzDNZeyZ5DQ2gM7LBoZPHmnjz8rUthkBG5tw==", - "dev": true - }, - "concat-map": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha1-2Klr13/Wjfd5OnMDajug1UBdR3s=", - "dev": true - }, - "concat-stream": { - "version": "1.6.2", - "resolved": "https://registry.npmjs.org/concat-stream/-/concat-stream-1.6.2.tgz", - "integrity": "sha512-27HBghJxjiZtIk3Ycvn/4kbJk/1uZuJFfuPEns6LaEvpvG1f0hTea8lilrouyo9mVc2GWdcEZ8OLoGmSADlrCw==", - "dev": true, - "requires": { - "buffer-from": "^1.0.0", - "inherits": "^2.0.3", - "readable-stream": "^2.2.2", - "typedarray": "^0.0.6" - } - }, - "core-util-is": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", - "integrity": "sha1-tf1UIgqivFq1eqtxQMlAdUUDwac=", - "dev": true - }, - "cross-spawn": { - "version": "7.0.3", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", - "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", - "dev": true, - "requires": { - "path-key": "^3.1.0", - "shebang-command": "^2.0.0", - "which": "^2.0.1" - } - }, - "cypress": { - "version": "6.8.0", - "resolved": "https://registry.npmjs.org/cypress/-/cypress-6.8.0.tgz", - "integrity": "sha512-W2e9Oqi7DmF48QtOD0LfsOLVq6ef2hcXZvJXI/E3PgFNmZXEVwBefhAxVCW9yTPortjYA2XkM20KyC4HRkOm9w==", - "dev": true, - "requires": { - "@cypress/listr-verbose-renderer": "^0.4.1", - "@cypress/request": "^2.88.5", - "@cypress/xvfb": "^1.2.4", - "@types/node": "12.12.50", - "@types/sinonjs__fake-timers": "^6.0.1", - "@types/sizzle": "^2.3.2", - "arch": "^2.1.2", - "blob-util": "2.0.2", - "bluebird": "^3.7.2", - "cachedir": "^2.3.0", - "chalk": "^4.1.0", - "check-more-types": "^2.24.0", - "cli-table3": "~0.6.0", - "commander": "^5.1.0", - "common-tags": "^1.8.0", - "dayjs": "^1.9.3", - "debug": "4.3.2", - "eventemitter2": "^6.4.2", - "execa": "^4.0.2", - "executable": "^4.1.1", - "extract-zip": "^1.7.0", - "fs-extra": "^9.0.1", - "getos": "^3.2.1", - "is-ci": "^2.0.0", - "is-installed-globally": "^0.3.2", - "lazy-ass": "^1.6.0", - "listr": "^0.14.3", - "lodash": "^4.17.19", - "log-symbols": "^4.0.0", - "minimist": "^1.2.5", - "moment": "^2.29.1", - "ospath": "^1.2.2", - "pretty-bytes": "^5.4.1", - "ramda": "~0.27.1", - "request-progress": "^3.0.0", - "supports-color": "^7.2.0", - "tmp": "~0.2.1", - "untildify": "^4.0.0", - "url": "^0.11.0", - "yauzl": "^2.10.0" - } - }, - "dashdash": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/dashdash/-/dashdash-1.14.1.tgz", - "integrity": "sha1-hTz6D3y+L+1d4gMmuN1YEDX24vA=", - "dev": true, - "requires": { - "assert-plus": "^1.0.0" - } - }, - "date-fns": { - "version": "1.30.1", - "resolved": "https://registry.npmjs.org/date-fns/-/date-fns-1.30.1.tgz", - "integrity": "sha512-hBSVCvSmWC+QypYObzwGOd9wqdDpOt+0wl0KbU+R+uuZBS1jN8VsD1ss3irQDknRj5NvxiTF6oj/nDRnN/UQNw==", - "dev": true - }, - "dayjs": { - "version": "1.10.4", - "resolved": "https://registry.npmjs.org/dayjs/-/dayjs-1.10.4.tgz", - "integrity": "sha512-RI/Hh4kqRc1UKLOAf/T5zdMMX5DQIlDxwUe3wSyMMnEbGunnpENCdbUgM+dW7kXidZqCttBrmw7BhN4TMddkCw==", - "dev": true - }, - "debug": { - "version": "4.3.2", - "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.2.tgz", - "integrity": "sha512-mOp8wKcvj7XxC78zLgw/ZA+6TSgkoE2C/ienthhRD298T7UNwAg9diBpLRxC0mOezLl4B0xV7M0cCO6P/O0Xhw==", - "dev": true, - "requires": { - "ms": "2.1.2" - } - }, - "delayed-stream": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", - "integrity": "sha1-3zrhmayt+31ECqrgsp4icrJOxhk=", - "dev": true - }, - "ecc-jsbn": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/ecc-jsbn/-/ecc-jsbn-0.1.2.tgz", - "integrity": "sha1-OoOpBOVDUyh4dMVkt1SThoSamMk=", - "dev": true, - "requires": { - "jsbn": "~0.1.0", - "safer-buffer": "^2.1.0" - } - }, - "elegant-spinner": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/elegant-spinner/-/elegant-spinner-1.0.1.tgz", - "integrity": "sha1-2wQ1IcldfjA/2PNFvtwzSc+wcp4=", - "dev": true - }, - "emoji-regex": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", - "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==", - "dev": true - }, - "end-of-stream": { - "version": "1.4.4", - "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", - "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", - "dev": true, - "requires": { - "once": "^1.4.0" - } - }, - "escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha1-G2HAViGQqN/2rjuyzwIAyhMLhtQ=", - "dev": true - }, - "eventemitter2": { - "version": "6.4.4", - "resolved": "https://registry.npmjs.org/eventemitter2/-/eventemitter2-6.4.4.tgz", - "integrity": "sha512-HLU3NDY6wARrLCEwyGKRBvuWYyvW6mHYv72SJJAH3iJN3a6eVUvkjFkcxah1bcTgGVBBrFdIopBJPhCQFMLyXw==", - "dev": true - }, - "execa": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/execa/-/execa-4.1.0.tgz", - "integrity": "sha512-j5W0//W7f8UxAn8hXVnwG8tLwdiUy4FJLcSupCg6maBYZDpyBvTApK7KyuI4bKj8KOh1r2YH+6ucuYtJv1bTZA==", - "dev": true, - "requires": { - "cross-spawn": "^7.0.0", - "get-stream": "^5.0.0", - "human-signals": "^1.1.1", - "is-stream": "^2.0.0", - "merge-stream": "^2.0.0", - "npm-run-path": "^4.0.0", - "onetime": "^5.1.0", - "signal-exit": "^3.0.2", - "strip-final-newline": "^2.0.0" - } - }, - "executable": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/executable/-/executable-4.1.1.tgz", - "integrity": "sha512-8iA79xD3uAch729dUG8xaaBBFGaEa0wdD2VkYLFHwlqosEj/jT66AzcreRDSgV7ehnNLBW2WR5jIXwGKjVdTLg==", - "dev": true, - "requires": { - "pify": "^2.2.0" - } - }, - "exit-hook": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/exit-hook/-/exit-hook-1.1.1.tgz", - "integrity": "sha1-8FyiM7SMBdVP/wd2XfhQfpXAL/g=", - "dev": true - }, - "extend": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/extend/-/extend-3.0.2.tgz", - "integrity": "sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==", - "dev": true - }, - "extract-zip": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/extract-zip/-/extract-zip-1.7.0.tgz", - "integrity": "sha512-xoh5G1W/PB0/27lXgMQyIhP5DSY/LhoCsOyZgb+6iMmRtCwVBo55uKaMoEYrDCKQhWvqEip5ZPKAc6eFNyf/MA==", - "dev": true, - "requires": { - "concat-stream": "^1.6.2", - "debug": "^2.6.9", - "mkdirp": "^0.5.4", - "yauzl": "^2.10.0" - }, - "dependencies": { - "debug": { - "version": "2.6.9", - "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", - "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", - "dev": true, - "requires": { - "ms": "2.0.0" - } - }, - "ms": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz", - "integrity": "sha1-VgiurfwAvmwpAd9fmGF4jeDVl8g=", - "dev": true - } - } - }, - "extsprintf": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/extsprintf/-/extsprintf-1.3.0.tgz", - "integrity": "sha1-lpGEQOMEGnpBT4xS48V06zw+HgU=", - "dev": true - }, - "fast-deep-equal": { - "version": "3.1.3", - "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", - "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", - "dev": true - }, - "fast-json-stable-stringify": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", - "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", - "dev": true - }, - "fd-slicer": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/fd-slicer/-/fd-slicer-1.1.0.tgz", - "integrity": "sha1-JcfInLH5B3+IkbvmHY85Dq4lbx4=", - "dev": true, - "requires": { - "pend": "~1.2.0" - } - }, - "figures": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/figures/-/figures-1.7.0.tgz", - "integrity": "sha1-y+Hjr/zxzUS4DK3+0o3Hk6lwHS4=", - "dev": true, - "requires": { - "escape-string-regexp": "^1.0.5", - "object-assign": "^4.1.0" - } - }, - "forever-agent": { - "version": "0.6.1", - "resolved": "https://registry.npmjs.org/forever-agent/-/forever-agent-0.6.1.tgz", - "integrity": "sha1-+8cfDEGt6zf5bFd60e1C2P2sypE=", - "dev": true - }, - "form-data": { - "version": "2.3.3", - "resolved": "https://registry.npmjs.org/form-data/-/form-data-2.3.3.tgz", - "integrity": "sha512-1lLKB2Mu3aGP1Q/2eCOx0fNbRMe7XdwktwOruhfqqd0rIJWwN4Dh+E3hrPSlDCXnSR7UtZ1N38rVXm+6+MEhJQ==", - "dev": true, - "requires": { - "asynckit": "^0.4.0", - "combined-stream": "^1.0.6", - "mime-types": "^2.1.12" - } - }, - "fs-extra": { - "version": "9.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", - "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", - "dev": true, - "requires": { - "at-least-node": "^1.0.0", - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - } - }, - "fs.realpath": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=", - "dev": true - }, - "get-stream": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz", - "integrity": "sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==", - "dev": true, - "requires": { - "pump": "^3.0.0" - } - }, - "getos": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/getos/-/getos-3.2.1.tgz", - "integrity": "sha512-U56CfOK17OKgTVqozZjUKNdkfEv6jk5WISBJ8SHoagjE6L69zOwl3Z+O8myjY9MEW3i2HPWQBt/LTbCgcC973Q==", - "dev": true, - "requires": { - "async": "^3.2.0" - } - }, - "getpass": { - "version": "0.1.7", - "resolved": "https://registry.npmjs.org/getpass/-/getpass-0.1.7.tgz", - "integrity": "sha1-Xv+OPmhNVprkyysSgmBOi6YhSfo=", - "dev": true, - "requires": { - "assert-plus": "^1.0.0" - } - }, - "glob": { - "version": "7.1.6", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.1.6.tgz", - "integrity": "sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==", - "dev": true, - "requires": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.0.4", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - } - }, - "global-dirs": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/global-dirs/-/global-dirs-2.1.0.tgz", - "integrity": "sha512-MG6kdOUh/xBnyo9cJFeIKkLEc1AyFq42QTU4XiX51i2NEdxLxLWXIjEjmqKeSuKR7pAZjTqUVoT2b2huxVLgYQ==", - "dev": true, - "requires": { - "ini": "1.3.7" - } - }, - "graceful-fs": { - "version": "4.2.6", - "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.6.tgz", - "integrity": "sha512-nTnJ528pbqxYanhpDYsi4Rd8MAeaBA67+RZ10CM1m3bTAVFEDcd5AuA4a6W5YkGZ1iNXHzZz8T6TBKLeBuNriQ==", - "dev": true - }, - "har-schema": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/har-schema/-/har-schema-2.0.0.tgz", - "integrity": "sha1-qUwiJOvKwEeCoNkDVSHyRzW37JI=", - "dev": true - }, - "har-validator": { - "version": "5.1.5", - "resolved": "https://registry.npmjs.org/har-validator/-/har-validator-5.1.5.tgz", - "integrity": "sha512-nmT2T0lljbxdQZfspsno9hgrG3Uir6Ks5afism62poxqBM6sDnMEuPmzTq8XN0OEwqKLLdh1jQI3qyE66Nzb3w==", - "dev": true, - "requires": { - "ajv": "^6.12.3", - "har-schema": "^2.0.0" - } - }, - "has-ansi": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/has-ansi/-/has-ansi-2.0.0.tgz", - "integrity": "sha1-NPUEnOHs3ysGSa8+8k5F7TVBbZE=", - "dev": true, - "requires": { - "ansi-regex": "^2.0.0" - } - }, - "has-flag": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", - "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true - }, - "http-signature": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/http-signature/-/http-signature-1.2.0.tgz", - "integrity": "sha1-muzZJRFHcvPZW2WmCruPfBj7rOE=", - "dev": true, - "requires": { - "assert-plus": "^1.0.0", - "jsprim": "^1.2.2", - "sshpk": "^1.7.0" - } - }, - "human-signals": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/human-signals/-/human-signals-1.1.1.tgz", - "integrity": "sha512-SEQu7vl8KjNL2eoGBLF3+wAjpsNfA9XMlXAYj/3EdaNfAlxKthD1xjEQfGOUhllCGGJVNY34bRr6lPINhNjyZw==", - "dev": true - }, - "indent-string": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/indent-string/-/indent-string-3.2.0.tgz", - "integrity": "sha1-Sl/W0nzDMvN+VBmlBNu4NxBckok=", - "dev": true - }, - "inflight": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", - "integrity": "sha1-Sb1jMdfQLQwJvJEKEHW6gWW1bfk=", - "dev": true, - "requires": { - "once": "^1.3.0", - "wrappy": "1" - } - }, - "inherits": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", - "dev": true - }, - "ini": { - "version": "1.3.7", - "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.7.tgz", - "integrity": "sha512-iKpRpXP+CrP2jyrxvg1kMUpXDyRUFDWurxbnVT1vQPx+Wz9uCYsMIqYuSBLV+PAaZG/d7kRLKRFc9oDMsH+mFQ==", - "dev": true - }, - "is-ci": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-ci/-/is-ci-2.0.0.tgz", - "integrity": "sha512-YfJT7rkpQB0updsdHLGWrvhBJfcfzNNawYDNIyQXJz0IViGf75O8EBPKSdvw2rF+LGCsX4FZ8tcr3b19LcZq4w==", - "dev": true, - "requires": { - "ci-info": "^2.0.0" - } - }, - "is-fullwidth-code-point": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", - "integrity": "sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==", - "dev": true - }, - "is-installed-globally": { - "version": "0.3.2", - "resolved": "https://registry.npmjs.org/is-installed-globally/-/is-installed-globally-0.3.2.tgz", - "integrity": "sha512-wZ8x1js7Ia0kecP/CHM/3ABkAmujX7WPvQk6uu3Fly/Mk44pySulQpnHG46OMjHGXApINnV4QhY3SWnECO2z5g==", - "dev": true, - "requires": { - "global-dirs": "^2.0.1", - "is-path-inside": "^3.0.1" - } - }, - "is-observable": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-observable/-/is-observable-1.1.0.tgz", - "integrity": "sha512-NqCa4Sa2d+u7BWc6CukaObG3Fh+CU9bvixbpcXYhy2VvYS7vVGIdAgnIS5Ks3A/cqk4rebLJ9s8zBstT2aKnIA==", - "dev": true, - "requires": { - "symbol-observable": "^1.1.0" - } - }, - "is-path-inside": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", - "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", - "dev": true - }, - "is-promise": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/is-promise/-/is-promise-2.2.2.tgz", - "integrity": "sha512-+lP4/6lKUBfQjZ2pdxThZvLUAafmZb8OAxFb8XXtiQmS35INgr85hdOGoEs124ez1FCnZJt6jau/T+alh58QFQ==", - "dev": true - }, - "is-stream": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-2.0.0.tgz", - "integrity": "sha512-XCoy+WlUr7d1+Z8GgSuXmpuUFC9fOhRXglJMx+dwLKTkL44Cjd4W1Z5P+BQZpr+cR93aGP4S/s7Ftw6Nd/kiEw==", - "dev": true - }, - "is-typedarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/is-typedarray/-/is-typedarray-1.0.0.tgz", - "integrity": "sha1-5HnICFjfDBsR3dppQPlgEfzaSpo=", - "dev": true - }, - "is-unicode-supported": { - "version": "0.1.0", - "resolved": "https://registry.npmjs.org/is-unicode-supported/-/is-unicode-supported-0.1.0.tgz", - "integrity": "sha512-knxG2q4UC3u8stRGyAVJCOdxFmv5DZiRcdlIaAQXAbSfJya+OhopNotLQrstBhququ4ZpuKbDc/8S6mgXgPFPw==", - "dev": true - }, - "isarray": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-1.0.0.tgz", - "integrity": "sha1-u5NdSFgsuhaMBoNJV6VKPgcSTxE=", - "dev": true - }, - "isexe": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha1-6PvzdNxVb/iUehDcsFctYz8s+hA=", - "dev": true - }, - "isstream": { - "version": "0.1.2", - "resolved": "https://registry.npmjs.org/isstream/-/isstream-0.1.2.tgz", - "integrity": "sha1-R+Y/evVa+m+S4VAOaQ64uFKcCZo=", - "dev": true - }, - "js-yaml": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.0.0.tgz", - "integrity": "sha512-pqon0s+4ScYUvX30wxQi3PogGFAlUyH0awepWvwkj4jD4v+ova3RiYw8bmA6x2rDrEaj8i/oWKoRxpVNW+Re8Q==", - "dev": true, - "requires": { - "argparse": "^2.0.1" - } - }, - "jsbn": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/jsbn/-/jsbn-0.1.1.tgz", - "integrity": "sha1-peZUwuWi3rXyAdls77yoDA7y9RM=", - "dev": true - }, - "json-schema": { - "version": "0.2.3", - "resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.2.3.tgz", - "integrity": "sha1-tIDIkuWaLwWVTOcnvT8qTogvnhM=", - "dev": true - }, - "json-schema-traverse": { - "version": "0.4.1", - "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", - "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", - "dev": true - }, - "json-stringify-safe": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz", - "integrity": "sha1-Epai1Y/UXxmg9s4B1lcB4sc1tus=", - "dev": true - }, - "jsonfile": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", - "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", - "dev": true, - "requires": { - "graceful-fs": "^4.1.6", - "universalify": "^2.0.0" - } - }, - "jsprim": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.1.tgz", - "integrity": "sha1-MT5mvB5cwG5Di8G3SZwuXFastqI=", - "dev": true, - "requires": { - "assert-plus": "1.0.0", - "extsprintf": "1.3.0", - "json-schema": "0.2.3", - "verror": "1.10.0" - } - }, - "lazy-ass": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/lazy-ass/-/lazy-ass-1.6.0.tgz", - "integrity": "sha1-eZllXoZGwX8In90YfRUNMyTVRRM=", - "dev": true - }, - "listr": { - "version": "0.14.3", - "resolved": "https://registry.npmjs.org/listr/-/listr-0.14.3.tgz", - "integrity": "sha512-RmAl7su35BFd/xoMamRjpIE4j3v+L28o8CT5YhAXQJm1fD+1l9ngXY8JAQRJ+tFK2i5njvi0iRUKV09vPwA0iA==", - "dev": true, - "requires": { - "@samverschueren/stream-to-observable": "^0.3.0", - "is-observable": "^1.1.0", - "is-promise": "^2.1.0", - "is-stream": "^1.1.0", - "listr-silent-renderer": "^1.1.1", - "listr-update-renderer": "^0.5.0", - "listr-verbose-renderer": "^0.5.0", - "p-map": "^2.0.0", - "rxjs": "^6.3.3" - }, - "dependencies": { - "is-stream": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/is-stream/-/is-stream-1.1.0.tgz", - "integrity": "sha1-EtSj3U5o4Lec6428hBc66A2RykQ=", - "dev": true - } - } - }, - "listr-silent-renderer": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/listr-silent-renderer/-/listr-silent-renderer-1.1.1.tgz", - "integrity": "sha1-kktaN1cVN3C/Go4/v3S4u/P5JC4=", - "dev": true - }, - "listr-update-renderer": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/listr-update-renderer/-/listr-update-renderer-0.5.0.tgz", - "integrity": "sha512-tKRsZpKz8GSGqoI/+caPmfrypiaq+OQCbd+CovEC24uk1h952lVj5sC7SqyFUm+OaJ5HN/a1YLt5cit2FMNsFA==", - "dev": true, - "requires": { - "chalk": "^1.1.3", - "cli-truncate": "^0.2.1", - "elegant-spinner": "^1.0.1", - "figures": "^1.7.0", - "indent-string": "^3.0.0", - "log-symbols": "^1.0.2", - "log-update": "^2.3.0", - "strip-ansi": "^3.0.1" - }, - "dependencies": { - "ansi-styles": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-2.2.1.tgz", - "integrity": "sha1-tDLdM1i2NM914eRmQ2gkBTPB3b4=", - "dev": true - }, - "chalk": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-1.1.3.tgz", - "integrity": "sha1-qBFcVeSnAv5NFQq9OHKCKn4J/Jg=", - "dev": true, - "requires": { - "ansi-styles": "^2.2.1", - "escape-string-regexp": "^1.0.2", - "has-ansi": "^2.0.0", - "strip-ansi": "^3.0.0", - "supports-color": "^2.0.0" - } - }, - "log-symbols": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-1.0.2.tgz", - "integrity": "sha1-N2/3tY6jCGoPCfrMdGF+ylAeGhg=", - "dev": true, - "requires": { - "chalk": "^1.0.0" - } - }, - "supports-color": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-2.0.0.tgz", - "integrity": "sha1-U10EXOa2Nj+kARcIRimZXp3zJMc=", - "dev": true - } - } - }, - "listr-verbose-renderer": { - "version": "0.5.0", - "resolved": "https://registry.npmjs.org/listr-verbose-renderer/-/listr-verbose-renderer-0.5.0.tgz", - "integrity": "sha512-04PDPqSlsqIOaaaGZ+41vq5FejI9auqTInicFRndCBgE3bXG8D6W1I+mWhk+1nqbHmyhla/6BUrd5OSiHwKRXw==", - "dev": true, - "requires": { - "chalk": "^2.4.1", - "cli-cursor": "^2.1.0", - "date-fns": "^1.27.2", - "figures": "^2.0.0" - }, - "dependencies": { - "ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dev": true, - "requires": { - "color-convert": "^1.9.0" - } - }, - "chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "dev": true, - "requires": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - } - }, - "cli-cursor": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-2.1.0.tgz", - "integrity": "sha1-s12sN2R5+sw+lHR9QdDQ9SOP/LU=", - "dev": true, - "requires": { - "restore-cursor": "^2.0.0" - } - }, - "color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "dev": true, - "requires": { - "color-name": "1.1.3" - } - }, - "color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha1-p9BVi9icQveV3UIyj3QIMcpTvCU=", - "dev": true - }, - "figures": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/figures/-/figures-2.0.0.tgz", - "integrity": "sha1-OrGi0qYsi/tDGgyUy3l6L84nyWI=", - "dev": true, - "requires": { - "escape-string-regexp": "^1.0.5" - } - }, - "has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha1-tdRU3CGZriJWmfNGfloH87lVuv0=", - "dev": true - }, - "mimic-fn": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-1.2.0.tgz", - "integrity": "sha512-jf84uxzwiuiIVKiOLpfYk7N46TSy8ubTonmneY9vrpHNAnp0QBt2BxWV9dO3/j+BoVAb+a5G6YDPW3M5HOdMWQ==", - "dev": true - }, - "onetime": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/onetime/-/onetime-2.0.1.tgz", - "integrity": "sha1-BnQoIw/WdEOyeUsiu6UotoZ5YtQ=", - "dev": true, - "requires": { - "mimic-fn": "^1.0.0" - } - }, - "restore-cursor": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-2.0.0.tgz", - "integrity": "sha1-n37ih/gv0ybU/RYpI9YhKe7g368=", - "dev": true, - "requires": { - "onetime": "^2.0.0", - "signal-exit": "^3.0.2" - } - }, - "supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "dev": true, - "requires": { - "has-flag": "^3.0.0" - } - } - } - }, - "lodash": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", - "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", - "dev": true - }, - "lodash.once": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/lodash.once/-/lodash.once-4.1.1.tgz", - "integrity": "sha1-DdOXEhPHxW34gJd9UEyI+0cal6w=", - "dev": true - }, - "log-symbols": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/log-symbols/-/log-symbols-4.1.0.tgz", - "integrity": "sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg==", - "dev": true, - "requires": { - "chalk": "^4.1.0", - "is-unicode-supported": "^0.1.0" - } - }, - "log-update": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/log-update/-/log-update-2.3.0.tgz", - "integrity": "sha1-iDKP19HOeTiykoN0bwsbwSayRwg=", - "dev": true, - "requires": { - "ansi-escapes": "^3.0.0", - "cli-cursor": "^2.0.0", - "wrap-ansi": "^3.0.1" - }, - "dependencies": { - "cli-cursor": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/cli-cursor/-/cli-cursor-2.1.0.tgz", - "integrity": "sha1-s12sN2R5+sw+lHR9QdDQ9SOP/LU=", - "dev": true, - "requires": { - "restore-cursor": "^2.0.0" - } - }, - "mimic-fn": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-1.2.0.tgz", - "integrity": "sha512-jf84uxzwiuiIVKiOLpfYk7N46TSy8ubTonmneY9vrpHNAnp0QBt2BxWV9dO3/j+BoVAb+a5G6YDPW3M5HOdMWQ==", - "dev": true - }, - "onetime": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/onetime/-/onetime-2.0.1.tgz", - "integrity": "sha1-BnQoIw/WdEOyeUsiu6UotoZ5YtQ=", - "dev": true, - "requires": { - "mimic-fn": "^1.0.0" - } - }, - "restore-cursor": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-2.0.0.tgz", - "integrity": "sha1-n37ih/gv0ybU/RYpI9YhKe7g368=", - "dev": true, - "requires": { - "onetime": "^2.0.0", - "signal-exit": "^3.0.2" - } - } - } - }, - "merge-stream": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/merge-stream/-/merge-stream-2.0.0.tgz", - "integrity": "sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==", - "dev": true - }, - "mime-db": { - "version": "1.46.0", - "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.46.0.tgz", - "integrity": "sha512-svXaP8UQRZ5K7or+ZmfNhg2xX3yKDMUzqadsSqi4NCH/KomcH75MAMYAGVlvXn4+b/xOPhS3I2uHKRUzvjY7BQ==", - "dev": true - }, - "mime-types": { - "version": "2.1.29", - "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.29.tgz", - "integrity": "sha512-Y/jMt/S5sR9OaqteJtslsFZKWOIIqMACsJSiHghlCAyhf7jfVYjKBmLiX8OgpWeW+fjJ2b+Az69aPFPkUOY6xQ==", - "dev": true, - "requires": { - "mime-db": "1.46.0" - } - }, - "mimic-fn": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/mimic-fn/-/mimic-fn-2.1.0.tgz", - "integrity": "sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==", - "dev": true - }, - "minimatch": { - "version": "3.0.4", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.0.4.tgz", - "integrity": "sha512-yJHVQEhyqPLUTgt9B83PXu6W3rx4MvvHvSUvToogpwoGDOUQ+yDrR0HRot+yOCdCO7u4hX3pWft6kWBBcqh0UA==", - "dev": true, - "requires": { - "brace-expansion": "^1.1.7" - } - }, - "minimist": { - "version": "1.2.6", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.6.tgz", - "integrity": "sha512-Jsjnk4bw3YJqYzbdyBiNsPWHPfO++UGG749Cxs6peCu5Xg4nrena6OVxOYxrQTqww0Jmwt+Ref8rggumkTLz9Q==", - "dev": true - }, - "mkdirp": { - "version": "0.5.5", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", - "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", - "dev": true, - "requires": { - "minimist": "^1.2.5" - } - }, - "moment": { - "version": "2.29.2", - "resolved": "https://registry.npmjs.org/moment/-/moment-2.29.2.tgz", - "integrity": "sha512-UgzG4rvxYpN15jgCmVJwac49h9ly9NurikMWGPdVxm8GZD6XjkKPxDTjQQ43gtGgnV3X0cAyWDdP2Wexoquifg==", - "dev": true - }, - "ms": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", - "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", - "dev": true - }, - "npm-run-path": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/npm-run-path/-/npm-run-path-4.0.1.tgz", - "integrity": "sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==", - "dev": true, - "requires": { - "path-key": "^3.0.0" - } - }, - "number-is-nan": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/number-is-nan/-/number-is-nan-1.0.1.tgz", - "integrity": "sha1-CXtgK1NCKlIsGvuHkDGDNpQaAR0=", - "dev": true - }, - "oauth-sign": { - "version": "0.9.0", - "resolved": "https://registry.npmjs.org/oauth-sign/-/oauth-sign-0.9.0.tgz", - "integrity": "sha512-fexhUFFPTGV8ybAtSIGbV6gOkSv8UtRbDBnAyLQw4QPKkgNlsH2ByPGtMUqdWkos6YCRmAqViwgZrJc/mRDzZQ==", - "dev": true - }, - "object-assign": { - "version": "4.1.1", - "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", - "integrity": "sha1-IQmtx5ZYh8/AXLvUQsrIv7s2CGM=", - "dev": true - }, - "once": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", - "integrity": "sha1-WDsap3WWHUsROsF9nFC6753Xa9E=", - "dev": true, - "requires": { - "wrappy": "1" - } - }, - "onetime": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/onetime/-/onetime-5.1.2.tgz", - "integrity": "sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==", - "dev": true, - "requires": { - "mimic-fn": "^2.1.0" - } - }, - "ospath": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/ospath/-/ospath-1.2.2.tgz", - "integrity": "sha1-EnZjl3Sj+O8lcvf+QoDg6kVQwHs=", - "dev": true - }, - "p-map": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/p-map/-/p-map-2.1.0.tgz", - "integrity": "sha512-y3b8Kpd8OAN444hxfBbFfj1FY/RjtTd8tzYwhUqNYXx0fXx2iX4maP4Qr6qhIKbQXI02wTLAda4fYUbDagTUFw==", - "dev": true - }, - "path-is-absolute": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", - "integrity": "sha1-F0uSaHNVNP+8es5r9TpanhtcX18=", - "dev": true - }, - "path-key": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", - "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", - "dev": true - }, - "pend": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/pend/-/pend-1.2.0.tgz", - "integrity": "sha1-elfrVQpng/kRUzH89GY9XI4AelA=", - "dev": true - }, - "performance-now": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/performance-now/-/performance-now-2.1.0.tgz", - "integrity": "sha1-Ywn04OX6kT7BxpMHrjZLSzd8nns=", - "dev": true - }, - "pify": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/pify/-/pify-2.3.0.tgz", - "integrity": "sha1-7RQaasBDqEnqWISY59yosVMw6Qw=", - "dev": true - }, - "pretty-bytes": { - "version": "5.6.0", - "resolved": "https://registry.npmjs.org/pretty-bytes/-/pretty-bytes-5.6.0.tgz", - "integrity": "sha512-FFw039TmrBqFK8ma/7OL3sDz/VytdtJr044/QUJtH0wK9lb9jLq9tJyIxUwtQJHwar2BqtiA4iCWSwo9JLkzFg==", - "dev": true - }, - "process-nextick-args": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/process-nextick-args/-/process-nextick-args-2.0.1.tgz", - "integrity": "sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==", - "dev": true - }, - "psl": { - "version": "1.8.0", - "resolved": "https://registry.npmjs.org/psl/-/psl-1.8.0.tgz", - "integrity": "sha512-RIdOzyoavK+hA18OGGWDqUTsCLhtA7IcZ/6NCs4fFJaHBDab+pDDmDIByWFRQJq2Cd7r1OoQxBGKOaztq+hjIQ==", - "dev": true - }, - "pump": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", - "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", - "dev": true, - "requires": { - "end-of-stream": "^1.1.0", - "once": "^1.3.1" - } - }, - "punycode": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz", - "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==", - "dev": true - }, - "qs": { - "version": "6.5.2", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.5.2.tgz", - "integrity": "sha512-N5ZAX4/LxJmF+7wN74pUD6qAh9/wnvdQcjq9TZjevvXzSUo7bfmw91saqMjzGS2xq91/odN2dW/WOl7qQHNDGA==", - "dev": true - }, - "querystring": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/querystring/-/querystring-0.2.0.tgz", - "integrity": "sha1-sgmEkgO7Jd+CDadW50cAWHhSFiA=", - "dev": true - }, - "ramda": { - "version": "0.27.1", - "resolved": "https://registry.npmjs.org/ramda/-/ramda-0.27.1.tgz", - "integrity": "sha512-PgIdVpn5y5Yns8vqb8FzBUEYn98V3xcPgawAkkgj0YJ0qDsnHCiNmZYfOGMgOvoB0eWFLpYbhxUR3mxfDIMvpw==", - "dev": true - }, - "readable-stream": { - "version": "2.3.7", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-2.3.7.tgz", - "integrity": "sha512-Ebho8K4jIbHAxnuxi7o42OrZgF/ZTNcsZj6nRKyUmkhLFq8CHItp/fy6hQZuZmP/n3yZ9VBUbp4zz/mX8hmYPw==", - "dev": true, - "requires": { - "core-util-is": "~1.0.0", - "inherits": "~2.0.3", - "isarray": "~1.0.0", - "process-nextick-args": "~2.0.0", - "safe-buffer": "~5.1.1", - "string_decoder": "~1.1.1", - "util-deprecate": "~1.0.1" - }, - "dependencies": { - "safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", - "dev": true - } - } - }, - "request-progress": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/request-progress/-/request-progress-3.0.0.tgz", - "integrity": "sha1-TKdUCBx/7GP1BeT6qCWqBs1mnb4=", - "dev": true, - "requires": { - "throttleit": "^1.0.0" - } - }, - "restore-cursor": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/restore-cursor/-/restore-cursor-1.0.1.tgz", - "integrity": "sha1-NGYfRohjJ/7SmRR5FSJS35LapUE=", - "dev": true, - "requires": { - "exit-hook": "^1.0.0", - "onetime": "^1.0.0" - }, - "dependencies": { - "onetime": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/onetime/-/onetime-1.1.0.tgz", - "integrity": "sha1-ofeDj4MUxRbwXs78vEzP4EtO14k=", - "dev": true - } - } - }, - "rimraf": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", - "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", - "dev": true, - "requires": { - "glob": "^7.1.3" - } - }, - "rxjs": { - "version": "6.6.6", - "resolved": "https://registry.npmjs.org/rxjs/-/rxjs-6.6.6.tgz", - "integrity": "sha512-/oTwee4N4iWzAMAL9xdGKjkEHmIwupR3oXbQjCKywF1BeFohswF3vZdogbmEF6pZkOsXTzWkrZszrWpQTByYVg==", - "dev": true, - "requires": { - "tslib": "^1.9.0" - } - }, - "safe-buffer": { - "version": "5.2.1", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", - "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", - "dev": true - }, - "safer-buffer": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/safer-buffer/-/safer-buffer-2.1.2.tgz", - "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", - "dev": true - }, - "shebang-command": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", - "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", - "dev": true, - "requires": { - "shebang-regex": "^3.0.0" - } - }, - "shebang-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", - "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", - "dev": true - }, - "signal-exit": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.3.tgz", - "integrity": "sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA==", - "dev": true - }, - "slice-ansi": { - "version": "0.0.4", - "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-0.0.4.tgz", - "integrity": "sha1-7b+JA/ZvfOL46v1s7tZeJkyDGzU=", - "dev": true - }, - "sshpk": { - "version": "1.16.1", - "resolved": "https://registry.npmjs.org/sshpk/-/sshpk-1.16.1.tgz", - "integrity": "sha512-HXXqVUq7+pcKeLqqZj6mHFUMvXtOJt1uoUx09pFW6011inTMxqI8BA8PM95myrIyyKwdnzjdFjLiE6KBPVtJIg==", - "dev": true, - "requires": { - "asn1": "~0.2.3", - "assert-plus": "^1.0.0", - "bcrypt-pbkdf": "^1.0.0", - "dashdash": "^1.12.0", - "ecc-jsbn": "~0.1.1", - "getpass": "^0.1.1", - "jsbn": "~0.1.0", - "safer-buffer": "^2.0.2", - "tweetnacl": "~0.14.0" - } - }, - "string_decoder": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.1.1.tgz", - "integrity": "sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==", - "dev": true, - "requires": { - "safe-buffer": "~5.1.0" - }, - "dependencies": { - "safe-buffer": { - "version": "5.1.2", - "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.1.2.tgz", - "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", - "dev": true - } - } - }, - "string-width": { - "version": "4.2.2", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.2.tgz", - "integrity": "sha512-XBJbT3N4JhVumXE0eoLU9DCjcaF92KLNqTmFCnG1pf8duUxFGwtP6AD6nkjw9a3IdiRtL3E2w3JDiE/xi3vOeA==", - "dev": true, - "requires": { - "emoji-regex": "^8.0.0", - "is-fullwidth-code-point": "^3.0.0", - "strip-ansi": "^6.0.0" - }, - "dependencies": { - "ansi-regex": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.0.tgz", - "integrity": "sha512-bY6fj56OUQ0hU1KjFNDQuJFezqKdrAyFdIevADiqrWHwSlbmBNMHp5ak2f40Pm8JTFyM2mqxkG6ngkHO11f/lg==", - "dev": true - }, - "strip-ansi": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.0.tgz", - "integrity": "sha512-AuvKTrTfQNYNIctbR1K/YGTR1756GycPsg7b9bdV9Duqur4gv6aKqHXah67Z8ImS7WEz5QVcOtlfW2rZEugt6w==", - "dev": true, - "requires": { - "ansi-regex": "^5.0.0" - } - } - } - }, - "strip-ansi": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-3.0.1.tgz", - "integrity": "sha1-ajhfuIU9lS1f8F0Oiq+UJ43GPc8=", - "dev": true, - "requires": { - "ansi-regex": "^2.0.0" - } - }, - "strip-final-newline": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/strip-final-newline/-/strip-final-newline-2.0.0.tgz", - "integrity": "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==", - "dev": true - }, - "supports-color": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", - "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, - "requires": { - "has-flag": "^4.0.0" - } - }, - "symbol-observable": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/symbol-observable/-/symbol-observable-1.2.0.tgz", - "integrity": "sha512-e900nM8RRtGhlV36KGEU9k65K3mPb1WV70OdjfxlG2EAuM1noi/E/BaW/uMhL7bPEssK8QV57vN3esixjUvcXQ==", - "dev": true - }, - "throttleit": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/throttleit/-/throttleit-1.0.0.tgz", - "integrity": "sha1-nnhYNtr0Z0MUWlmEtiaNgoUorGw=", - "dev": true - }, - "tmp": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.2.1.tgz", - "integrity": "sha512-76SUhtfqR2Ijn+xllcI5P1oyannHNHByD80W1q447gU3mp9G9PSpGdWmjUOHRDPiHYacIk66W7ubDTuPF3BEtQ==", - "dev": true, - "requires": { - "rimraf": "^3.0.0" - } - }, - "tough-cookie": { - "version": "2.5.0", - "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.5.0.tgz", - "integrity": "sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g==", - "dev": true, - "requires": { - "psl": "^1.1.28", - "punycode": "^2.1.1" - } - }, - "tslib": { - "version": "1.14.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.14.1.tgz", - "integrity": "sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==", - "dev": true - }, - "tunnel-agent": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", - "integrity": "sha1-J6XeoGs2sEoKmWZ3SykIaPD8QP0=", - "dev": true, - "requires": { - "safe-buffer": "^5.0.1" - } - }, - "tweetnacl": { - "version": "0.14.5", - "resolved": "https://registry.npmjs.org/tweetnacl/-/tweetnacl-0.14.5.tgz", - "integrity": "sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q=", - "dev": true - }, - "typedarray": { - "version": "0.0.6", - "resolved": "https://registry.npmjs.org/typedarray/-/typedarray-0.0.6.tgz", - "integrity": "sha1-hnrHTjhkGHsdPUfZlqeOxciDB3c=", - "dev": true - }, - "universalify": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", - "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", - "dev": true - }, - "untildify": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/untildify/-/untildify-4.0.0.tgz", - "integrity": "sha512-KK8xQ1mkzZeg9inewmFVDNkg3l5LUhoq9kN6iWYB/CC9YMG8HA+c1Q8HwDe6dEX7kErrEVNVBO3fWsVq5iDgtw==", - "dev": true - }, - "uri-js": { - "version": "4.4.1", - "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", - "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", - "dev": true, - "requires": { - "punycode": "^2.1.0" - } - }, - "url": { - "version": "0.11.0", - "resolved": "https://registry.npmjs.org/url/-/url-0.11.0.tgz", - "integrity": "sha1-ODjpfPxgUh63PFJajlW/3Z4uKPE=", - "dev": true, - "requires": { - "punycode": "1.3.2", - "querystring": "0.2.0" - }, - "dependencies": { - "punycode": { - "version": "1.3.2", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.3.2.tgz", - "integrity": "sha1-llOgNvt8HuQjQvIyXM7v6jkmxI0=", - "dev": true - } - } - }, - "util-deprecate": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", - "integrity": "sha1-RQ1Nyfpw3nMnYvvS1KKJgUGaDM8=", - "dev": true - }, - "uuid": { - "version": "3.4.0", - "resolved": "https://registry.npmjs.org/uuid/-/uuid-3.4.0.tgz", - "integrity": "sha512-HjSDRw6gZE5JMggctHBcjVak08+KEVhSIiDzFnT9S9aegmp85S/bReBVTb4QTFaRNptJ9kuYaNhnbNEOkbKb/A==", - "dev": true - }, - "verror": { - "version": "1.10.0", - "resolved": "https://registry.npmjs.org/verror/-/verror-1.10.0.tgz", - "integrity": "sha1-OhBcoXBTr1XW4nDB+CiGguGNpAA=", - "dev": true, - "requires": { - "assert-plus": "^1.0.0", - "core-util-is": "1.0.2", - "extsprintf": "^1.2.0" - } - }, - "which": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", - "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", - "dev": true, - "requires": { - "isexe": "^2.0.0" - } - }, - "wrap-ansi": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-3.0.1.tgz", - "integrity": "sha1-KIoE2H7aXChuBg3+jxNc6NAH+Lo=", - "dev": true, - "requires": { - "string-width": "^2.1.1", - "strip-ansi": "^4.0.0" - }, - "dependencies": { - "ansi-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-3.0.0.tgz", - "integrity": "sha1-7QMXwyIGT3lGbAKWa922Bas32Zg=", - "dev": true - }, - "is-fullwidth-code-point": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-2.0.0.tgz", - "integrity": "sha1-o7MKXE8ZkYMWeqq5O+764937ZU8=", - "dev": true - }, - "string-width": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-2.1.1.tgz", - "integrity": "sha512-nOqH59deCq9SRHlxq1Aw85Jnt4w6KvLKqWVik6oA9ZklXLNIOlqg4F2yrT1MVaTjAqvVwdfeZ7w7aCvJD7ugkw==", - "dev": true, - "requires": { - "is-fullwidth-code-point": "^2.0.0", - "strip-ansi": "^4.0.0" - } - }, - "strip-ansi": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-4.0.0.tgz", - "integrity": "sha1-qEeQIusaw2iocTibY1JixQXuNo8=", - "dev": true, - "requires": { - "ansi-regex": "^3.0.0" - } - } - } - }, - "wrappy": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=", - "dev": true - }, - "yauzl": { - "version": "2.10.0", - "resolved": "https://registry.npmjs.org/yauzl/-/yauzl-2.10.0.tgz", - "integrity": "sha1-x+sXyT4RLLEIb6bY5R+wZnt5pfk=", - "dev": true, - "requires": { - "buffer-crc32": "~0.2.3", - "fd-slicer": "~1.1.0" - } - } - } -} diff --git a/tests/tests_e2e/package.json b/tests/tests_e2e/package.json deleted file mode 100644 index b63ee860b6..0000000000 --- a/tests/tests_e2e/package.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "name": "e2etest", - "version": "1.0.0", - "description": "Cypress e2e tests", - "main": "index.js", - "scripts": { - "test": "cypress run", - "cypress:open": "cypress open" - }, - "author": "", - "license": "ISC", - "devDependencies": { - "cypress": "^6.8.0", - "js-yaml": "^4.0.0", - "lodash": "^4.17.21" - } -} diff --git a/tests/tests_e2e/playwright/README.md b/tests/tests_e2e/playwright/README.md index 99a285f3a6..c328681273 100644 --- a/tests/tests_e2e/playwright/README.md +++ b/tests/tests_e2e/playwright/README.md @@ -1,199 +1,190 @@ -# Nebari integration testing with Playwright +# Nebari Integration Testing with Playwright -## How does it work? +## How Does It Work? -Playwright manages interactions with any website. We are using it to interact -with a deployed Nebari instance and test the various integrations that are -included. +Playwright manages interactions with websites, and we use it to interact with a deployed Nebari instance and test various integrations. -For our test suite, we utilize Playwright's synchronous API. The first task -is to launch the web browser you'd like to test in. Options in our test suite -are `chromium`, `webkit`, and `firefox`. Playwright uses browser contexts to -achieve test isolation. The context can either be created by default or -manually (for the purposes of generating multiple contexts per test in the case -of admin vs user testing). Next the page on the browser is created. For all -tests this starts as a blank page, then during the test, we navigate to a given -url. This is all achieved in the `setup` method of the `Navigator` class. +We use Playwright's synchronous API for our test suite. The first task is to launch the web browser of your choice: `chromium`, `webkit`, or `firefox`. Playwright uses browser contexts for test isolation, which can be created by default or manually for scenarios like admin vs. user testing. Each test starts with a blank page, and we navigate to a given URL during the test. This setup is managed by the `setup` method in the `Navigator` class. + +## Directory Structure + +The project directory structure is as follows: + +``` +tests +├── common +│   ├── __init__.py +│   ├── navigator.py +│   ├── handlers.py +│   ├── playwright_fixtures.py +├── ... +├── tests_e2e +│   └── playwright +│   ├── README.md +│   └── test_playwright.py +``` + +- `test_data/`: Contains test files, such as sample notebooks. +- `test_playwright.py`: The main test script that uses Playwright for integration testing. +- `navigator.py`: Contains the `NavigatorMixin` class, which manages browser + interactions and context. As well as the `LoginNavigator` class, which manages user + authentication and `ServerManager` class, which manages the user instance spawning. +- `handlers.py`: Contains classes fore handling the different level of access to + services a User might encounter, such as Notebook, Conda-store and others. ## Setup -Install Nebari with the development requirements (which include Playwright) +1. **Install Nebari with Development Requirements** -`pip install -e ".[dev]"` + Install Nebari including development requirements (which include Playwright): -Then install playwright itself (required). + ```bash + pip install -e ".[dev]" + ``` -`playwright install` +2. **Install Playwright** -> If you see the warning `BEWARE: your OS is not officially supported by Playwright; downloading fallback build., it is not critical.` Playwright will likely still work microsoft/playwright#15124 + Install Playwright: -### Create environment file + ```bash + playwright install + ``` -Create a copy of the `.env` template file + *Note:* If you see the warning `BEWARE: your OS is not officially supported by Playwright; downloading fallback build`, it is not critical. Playwright should still work (see microsoft/playwright#15124). -```bash -cd tests_e2e/playwright -cp .env.tpl .env -``` +3. **Create Environment Vars** -Fill in the newly created `.env` file with the following values: + Fill in your execution space environment with the following values: -* KEYCLOAK_USERNAME: Nebari username for username/password login OR Google email address or Google sign in -* KEYCLOAK_PASSWORD: Password associated with USERNAME -* NEBARI_FULL_URL: full url path including scheme to Nebari instance, e.g. "https://nebari.quansight.dev/" + - `KEYCLOAK_USERNAME`: Nebari username for username/password login or Google email address/Google sign-in. + - `KEYCLOAK_PASSWORD`: Password associated with `KEYCLOAK_USERNAME`. + - `NEBARI_FULL_URL`: Full URL path including scheme to the Nebari instance (e.g., "https://nebari.quansight.dev/"). -This user can be created with the following command (or you can use an existing non-root user): + This user can be created with the following command (or use an existing non-root user): -``` -nebari keycloak adduser --user --config -``` + ```bash + nebari keycloak adduser --user --config + ``` -## Running the Playwright tests +## Running the Playwright Tests -The playwright tests are run inside of pytest using +Playwright tests are run inside of pytest using: -```python +```bash pytest tests_e2e/playwright/test_playwright.py ``` -Videos of the test playback will be available in `$PWD/videos/`. -To see what is happening while the test is run, pass the `--headed` option to `pytest`. -You can also add the `--slowmo=$MILLI_SECONDS` option to add a delay before each action -by Playwright and thus slowing down the process. +Videos of the test playback will be available in `$PWD/videos/`. To disabled the browser +runtime preview of what is happening while the test runs, pass the `--headed` option to `pytest`. You +can also add the `--slowmo=$MILLI_SECONDS` option to introduce a delay before each +action by Playwright, thereby slowing down the process. -Another option is to run playwright methods outside of pytest. Both -`navigator.py` and `run_notebook.py` can be run as scripts. For example, +Alternatively, you can run Playwright methods outside of pytest. Below an example of +how to run a test, where you can interface with the Notebook handler: ```python - import os - - import dotenv - # load environment variables from .env file - dotenv.load_dotenv() - # instantiate the navigator class - nav = Navigator( - nebari_url="https://nebari.quansight.dev/", - username=os.environ["KEYCLOAK_USERNAME"], - password=os.environ["KEYCLOAK_PASSWORD"], - auth="password", - instance_name="small-instance", - headless=False, - slow_mo=100, - ) - # go through login sequence (defined by `auth` method in Navigator class) - nav.login() - # start the nebari server (defined by `instance_type` in Navigator class) - nav.start_server() - # reset the jupyterlab workspace to ensure we're starting with only the - # Launcher screen open, and we're in the root directory. - nav.reset_workspace() - # instantiate our test application - test_app = RunNotebook(navigator=nav) - # Write the sample notebook on the nebari instance - notebook_filepath_in_repo = ( - "tests_e2e/playwright/test_data/test_notebook_output.ipynb" - ) - notebook_filepath_on_nebari = "test_notebook_output.ipynb" - with open(notebook_filepath_in_repo, "r") as notebook: - test_app.nav.write_file( - filepath=notebook_filepath_on_nebari, content=notebook.read() - ) - # run a sample notebook - test_app.run_notebook( - path="nebari/tests_e2e/playwright/test_data/test_notebook_output.ipynb", - expected_output_text="success: 6", - conda_env="conda-env-default-py", - ) - # close out playwright and its associated browser handles - nav.teardown() +import os +import dotenv +from pathlib import Path + +from tests.common.navigator import ServerManager +from tests.common.handlers import Notebook + + +# Instantiate the Navigator class +nav = ServerManage( + nebari_url="https://nebari.quansight.dev/", + username=os.environ["KEYCLOAK_USERNAME"], + password=os.environ["KEYCLOAK_PASSWORD"], + auth="password", + instance_name="small-instance", + headless=False, + slow_mo=100, +) + + +notebook_manager = Notebook(navigator=navigator) + +# Reset the JupyterLab workspace to ensure we're starting with only the Launcher screen open and in the root directory. +notebook_manager.reset_workspace() + +notebook_name = "test_notebook_output.ipynb" +notebook_path = Path("tests_e2e/playwright/test_data") / notebook_name + +assert notebook_path.exists() + +# Write the sample notebook on the Nebari instance +with open(notebook_path, "r") as notebook: + notebook_manager.write_file(filepath=notebook_name, content=notebook.read()) + +# Run a sample notebook (and collect the outputs) +outputs = notebook_manager.run_notebook( + notebook_name=notebook_name, kernel="default" +) + +# Close out Playwright and its associated browser handles +nav.teardown() ``` -## Writing Playwright tests +## Writing Playwright Tests -In general most of the testing happens through `locators` which is Playwright's -way of connecting a python object to the HTML element on the page. -The Playwright API has several mechanisms for getting a locator for an item on -the page (`get_by_role`, `get_by_text`, `get_by_label`, `get_by_placeholder`, -etc). +Most testing is done through `locators`, which connect Python objects to HTML elements on the page. Playwright offers several mechanisms for getting a locator for an item on the page, such as `get_by_role`, `get_by_text`, `get_by_label`, and `get_by_placeholder`. ```python button = self.page.get_by_role("button", name="Sign in with Keycloak") ``` -Once you have a handle on a locator, you can interact with it in different ways, -depending on the type of object. For example, clicking -a button: +Once you have a handle on a locator, you can interact with it in various ways, depending on the type of object. For example, clicking a button: ```python button.click() ``` -Occasionally you'll need to wait for things to load on the screen. We can -either wait for the page to finish loading: +Sometimes you'll need to wait for elements to load on the screen. You can wait for the page to finish loading: ```python self.page.wait_for_load_state("networkidle") ``` -or we can wait for something specific to happen with the locator itself: +Or wait for something specific to happen with the locator itself: ```python button.wait_for(timeout=3000, state="attached") ``` -Note that waiting for the page to finish loading may be deceptive inside of -Jupyterlab since things may need to load _inside_ the page, not necessarily -causing network traffic - or causing several bursts network traffic, which -would incorrectly pass the `wait_for_load_state` after the first burst. +Note that waiting for the page to finish loading may be misleading inside of JupyterLab since elements may need to load _inside_ the page or cause several bursts of network traffic. + +Playwright has a built-in auto-wait feature that waits for a timeout period for actionable items. See [Playwright Actionability](https://playwright.dev/docs/actionability). + +## Parameterized Decorators + +### Usage -Playwright has a built-in auto-wait feature which waits for a timeout period -for some actionable items. See https://playwright.dev/docs/actionability . +Parameterized decorators in your test setup allow you to run tests with different configurations or contexts. They are particularly useful for testing different scenarios, such as varying user roles or application states. -### Workflow for creating new tests +To easy the control over the initial setup of spawning the user instance and login, we +already provider two base decorators that can be used in your test: +- `server_parameterized`: Allows to login and spin a new instance of the server, based + on the provided instance type. Allows for the nav.page to be run within the JupyterLab environment. +- ` login_parameterized`: Allow login to Nebari and sets you test workspace to the main + hub, allow your tests to attest things like the launcher screen or the navbar components. -An example of running a new run notebook test might look like this: +For example, using parameterized decorators to test different user roles might look like this: ```python - import os - - import dotenv - # load environment variables from .env file - dotenv.load_dotenv() - # instantiate the navigator class - nav = Navigator( - nebari_url="https://nebari.quansight.dev/", - username=os.environ["KEYCLOAK_USERNAME"], - password=os.environ["KEYCLOAK_PASSWORD"], - auth="password", - instance_name="small-instance", - headless=False, - slow_mo=100, - ) - # go through login sequence (defined by `auth` method in Navigator class) - nav.login() - # start the nebari server (defined by `instance_type` in Navigator class) - nav.start_server() - # reset the jupyterlab workspace to ensure we're starting with only the - # Launcher screen open, and we're in the root directory. - nav.reset_workspace() - # instantiate our test application - test_app = RunNotebook(navigator=nav) - # Write the sample notebook on the nebari instance - notebook_filepath_in_repo = ( - "tests_e2e/playwright/test_data/test_notebook_output.ipynb" - ) - notebook_filepath_on_nebari = "test_notebook_output.ipynb" - with open(notebook_filepath_in_repo, "r") as notebook: - test_app.nav.write_file( - filepath=notebook_filepath_on_nebari, content=notebook.read() - ) - # run a sample notebook - test_app.run_notebook( - path="nebari/tests_e2e/playwright/test_data/test_notebook_output.ipynb", - expected_output_text="success: 6", - conda_env="conda-env-default-py", - ) - # close out playwright and its associated browser handles - nav.teardown() +@pytest.mark.parametrize("is_admin", [False]) +@login_parameterized() +def test_role_button(navigator, is_admin): + _ = navigator.page.get_by_role("button", name="Admin Button").is_visible() + assert _ == is_admin + # Perform tests specific to the user role... ``` +In the example above, we used the `login_parameterized` decorator which will log in as an user +(based on the KEYCLOAK_USERNAME and KEYCLOAK_PASSWORD) and and let you wander under the logged workspace, +we attest for the presence of the "Admin Button" in the page (which does not exist). + +If your test suit presents a need for a more complex sequence of actions or special +parsing around the contents present in each page, you can create +your own handler to execute the auxiliary actions while the test is running. Check the +`handlers.py` over some examples of how that's being done. diff --git a/tests/tests_e2e/playwright/test_playwright.py b/tests/tests_e2e/playwright/test_playwright.py index 903af3f0dd..9d04a4e027 100644 --- a/tests/tests_e2e/playwright/test_playwright.py +++ b/tests/tests_e2e/playwright/test_playwright.py @@ -1,18 +1,93 @@ -from tests.common.playwright_fixtures import navigator_parameterized -from tests.common.run_notebook import Notebook +import pytest +from playwright.sync_api import expect +from tests.common.handlers import CondaStore, Notebook +from tests.common.playwright_fixtures import login_parameterized, server_parameterized + + +@login_parameterized() +def test_login_logout(navigator): + expect(navigator.page.get_by_text(navigator.username)).to_be_visible() + + navigator.logout() + expect(navigator.page.get_by_text("Sign in with Keycloak")).to_be_visible() + + +@pytest.mark.parametrize( + "services", + [ + ( + [ + "Home", + "Token", + "User Management", + "Argo Workflows", + "Environment Management", + "Monitoring", + ] + ), + ], +) +@login_parameterized() +def test_navbar_services(navigator, services): + navigator.page.goto(navigator.nebari_url + "hub/home") + navigator.page.wait_for_load_state("networkidle") + navbar_items = navigator.page.locator("#thenavbar").get_by_role("link") + navbar_items_names = [item.text_content() for item in navbar_items.all()] + assert len(navbar_items_names) == len(services) + assert navbar_items_names == services + + +@pytest.mark.parametrize( + "expected_outputs", + [ + (["success: 6"]), + ], +) +@server_parameterized(instance_name="small-instance") +def test_notebook(navigator, test_data_root, expected_outputs): + notebook_manager = Notebook(navigator=navigator) + + notebook_manager.reset_workspace() -@navigator_parameterized(instance_name="small-instance") -def test_notebook(navigator, test_data_root): - test_app = Notebook(navigator=navigator) notebook_name = "test_notebook_output.ipynb" notebook_path = test_data_root / notebook_name + assert notebook_path.exists() + with open(notebook_path, "r") as notebook: - test_app.nav.write_file(filepath=notebook_name, content=notebook.read()) - test_app.run( - path=notebook_name, - expected_outputs=["success: 6"], - conda_env="default *", - timeout=500, + notebook_manager.write_file(filepath=notebook_name, content=notebook.read()) + + outputs = notebook_manager.run_notebook( + notebook_name=notebook_name, kernel="default" ) + + assert outputs == expected_outputs + + # Clean up + notebook_manager.reset_workspace() + + +@pytest.mark.parametrize( + "namespaces", + [ + (["analyst", "developer", "global", "nebari-git", "users"]), + ], +) +@server_parameterized(instance_name="small-instance") +def test_conda_store_ui(navigator, namespaces): + conda_store = CondaStore(navigator=navigator) + + conda_store.reset_workspace() + + conda_store.conda_store_ui() + + shown_namespaces = conda_store._get_shown_namespaces() + shown_namespaces.sort() + + namespaces.append(navigator.username) + namespaces.sort() + + assert shown_namespaces == namespaces + # Clean up + conda_store.reset_workspace() diff --git a/tests/tests_unit/cli_validate/min.happy.jupyterlab.gallery_settings.yaml b/tests/tests_unit/cli_validate/min.happy.jupyterlab.gallery_settings.yaml new file mode 100644 index 0000000000..02df930fdd --- /dev/null +++ b/tests/tests_unit/cli_validate/min.happy.jupyterlab.gallery_settings.yaml @@ -0,0 +1,10 @@ +project_name: test +jupyterlab: + gallery_settings: + title: Example repositories + destination: examples + exhibits: + - title: Nebari + git: https://github.com/nebari-dev/nebari.git + homepage: https://github.com/nebari-dev/nebari + description: 🪴 Nebari - your open source data science platform diff --git a/tests/tests_unit/conftest.py b/tests/tests_unit/conftest.py index d78dfdf1ec..ce60e44799 100644 --- a/tests/tests_unit/conftest.py +++ b/tests/tests_unit/conftest.py @@ -125,7 +125,7 @@ def _mock_return_value(return_value): "gcp.nebari.dev", schema.ProviderEnum.gcp, GCP_DEFAULT_REGION, - CiEnum.github_actions, + CiEnum.gitlab_ci, AuthenticationEnum.password, ), ( @@ -154,6 +154,11 @@ def nebari_config_options(request) -> schema.Main: auth_provider, ) = request.param + if ci_provider == CiEnum.github_actions: + repo = DEFAULT_GH_REPO + else: + repo = None + return dict( project_name=project, namespace=namespace, @@ -162,7 +167,7 @@ def nebari_config_options(request) -> schema.Main: region=region, ci_provider=ci_provider, auth_provider=auth_provider, - repository=DEFAULT_GH_REPO, + repository=repo, repository_auto_provision=False, auth_auto_provision=False, terraform_state=DEFAULT_TERRAFORM_STATE, diff --git a/tests/tests_unit/test_cli_init.py b/tests/tests_unit/test_cli_init.py index 0cd0fe03d2..9afab5ddc5 100644 --- a/tests/tests_unit/test_cli_init.py +++ b/tests/tests_unit/test_cli_init.py @@ -51,6 +51,8 @@ (["--ssl-cert-email"], 2, ["requires an argument"]), (["--output"], 2, ["requires an argument"]), (["-o"], 2, ["requires an argument"]), + (["--explicit"], 2, ["Missing option"]), + (["-e"], 2, ["Missing option"]), ], ) def test_cli_init_stdout(args: List[str], exit_code: int, content: List[str]): @@ -90,20 +92,22 @@ def generate_test_data_test_cli_init_happy_path(): ) in get_kubernetes_versions(provider) + [ "latest" ]: - test_data.append( - ( - provider, - region, - project_name, - domain_name, - namespace, - auth_provider, - ci_provider, - terraform_state, - email, - kubernetes_version, + for explicit in [True, False]: + test_data.append( + ( + provider, + region, + project_name, + domain_name, + namespace, + auth_provider, + ci_provider, + terraform_state, + email, + kubernetes_version, + explicit, + ) ) - ) keys = [ "provider", @@ -116,6 +120,7 @@ def generate_test_data_test_cli_init_happy_path(): "terraform_state", "email", "kubernetes_version", + "explicit", ] return {"keys": keys, "test_data": test_data} @@ -131,6 +136,7 @@ def test_cli_init_happy_path( terraform_state: str, email: str, kubernetes_version: str, + explicit: bool, ): app = create_cli() args = [ @@ -159,6 +165,8 @@ def test_cli_init_happy_path( "--region", region, ] + if explicit: + args += ["--explicit"] expected_yaml = f""" provider: {provider} diff --git a/tests/tests_unit/test_cli_init_repository.py b/tests/tests_unit/test_cli_init_repository.py index 6bc0d4e7d4..34746722ba 100644 --- a/tests/tests_unit/test_cli_init_repository.py +++ b/tests/tests_unit/test_cli_init_repository.py @@ -174,21 +174,19 @@ def test_cli_init_error_repository_missing_env(monkeypatch: pytest.MonkeyPatch): assert tmp_file.exists() is False -def test_cli_init_error_invalid_repo(monkeypatch: pytest.MonkeyPatch): - monkeypatch.setenv("GITHUB_USERNAME", TEST_GITHUB_USERNAME) - monkeypatch.setenv("GITHUB_TOKEN", TEST_GITHUB_TOKEN) - +@pytest.mark.parametrize( + "url", + [ + "https://github.com", + "http://github.com/user/repo", + "https://github.com/user/" "github.com/user/repo", + "https://notgithub.com/user/repository", + ], +) +def test_cli_init_error_invalid_repo(url): app = create_cli() - args = [ - "init", - "local", - "--project-name", - "test", - "--repository-auto-provision", - "--repository", - "https://notgithub.com", - ] + args = ["init", "local", "--project-name", "test", "--repository", url] with tempfile.TemporaryDirectory() as tmp: tmp_file = Path(tmp).resolve() / "nebari-config.yaml" diff --git a/tests/tests_unit/test_cli_validate.py b/tests/tests_unit/test_cli_validate.py index faf2efa8a1..07a931acd4 100644 --- a/tests/tests_unit/test_cli_validate.py +++ b/tests/tests_unit/test_cli_validate.py @@ -114,20 +114,26 @@ def test_cli_validate_from_env(): ["validate", "--config", tmp_file.resolve()], env={"NEBARI_SECRET__amazon_web_services__kubernetes_version": "1.20"}, ) - - assert 0 == valid_result.exit_code - assert not valid_result.exception - assert "Successfully validated configuration" in valid_result.stdout + try: + assert 0 == valid_result.exit_code + assert not valid_result.exception + assert "Successfully validated configuration" in valid_result.stdout + except AssertionError: + print(valid_result.stdout) + raise invalid_result = runner.invoke( app, ["validate", "--config", tmp_file.resolve()], env={"NEBARI_SECRET__amazon_web_services__kubernetes_version": "1.0"}, ) - - assert 1 == invalid_result.exit_code - assert invalid_result.exception - assert "Invalid `kubernetes-version`" in invalid_result.stdout + try: + assert 1 == invalid_result.exit_code + assert invalid_result.exception + assert "Invalid `kubernetes-version`" in invalid_result.stdout + except AssertionError: + print(invalid_result.stdout) + raise @pytest.mark.parametrize( diff --git a/tests/tests_unit/test_init.py b/tests/tests_unit/test_init.py index 8d880162d3..7f81927275 100644 --- a/tests/tests_unit/test_init.py +++ b/tests/tests_unit/test_init.py @@ -15,7 +15,7 @@ ], ) def test_render_config(mock_all_cloud_methods, k8s_version, cloud_provider, expected): - if type(expected) == type and issubclass(expected, Exception): + if type(expected) is type and issubclass(expected, Exception): with pytest.raises(expected): config = render_config( project_name="test", diff --git a/tests/tests_unit/test_provider.py b/tests/tests_unit/test_provider.py deleted file mode 100644 index 3c4f35a1d0..0000000000 --- a/tests/tests_unit/test_provider.py +++ /dev/null @@ -1,54 +0,0 @@ -from contextlib import nullcontext - -import pytest - -from _nebari.provider.cloud.google_cloud import check_missing_service - - -@pytest.mark.parametrize( - "activated_services, exception", - [ - ( - { - "Compute Engine API", - "Kubernetes Engine API", - "Cloud Monitoring API", - "Cloud Autoscaling API", - "Identity and Access Management (IAM) API", - "Cloud Resource Manager API", - }, - nullcontext(), - ), - ( - { - "Compute Engine API", - "Kubernetes Engine API", - "Cloud Monitoring API", - "Cloud Autoscaling API", - "Identity and Access Management (IAM) API", - "Cloud Resource Manager API", - "Cloud SQL Admin API", - }, - nullcontext(), - ), - ( - { - "Compute Engine API", - "Kubernetes Engine API", - "Cloud Monitoring API", - "Cloud Autoscaling API", - "Cloud SQL Admin API", - }, - pytest.raises(ValueError, match=r"Missing required services:.*"), - ), - ], -) -def test_gcp_missing_service(monkeypatch, activated_services, exception): - def mock_return(): - return activated_services - - monkeypatch.setattr( - "_nebari.provider.cloud.google_cloud.activated_services", mock_return - ) - with exception: - check_missing_service() diff --git a/tests/tests_unit/test_schema.py b/tests/tests_unit/test_schema.py index 446b6d1085..fa6a0c747c 100644 --- a/tests/tests_unit/test_schema.py +++ b/tests/tests_unit/test_schema.py @@ -139,7 +139,7 @@ def test_multiple_providers(config_schema): config_schema(**config_dict) -def test_aws_premissions_boundary(config_schema): +def test_aws_permissions_boundary(config_schema): permissions_boundary = "arn:aws:iam::123456789012:policy/MyBoundaryPolicy" config_dict = { "project_name": "test", @@ -156,7 +156,7 @@ def test_aws_premissions_boundary(config_schema): @pytest.mark.parametrize("provider", ["local", "existing"]) -def test_setted_provider(config_schema, provider): +def test_set_provider(config_schema, provider): config_dict = { "project_name": "test", "provider": provider, diff --git a/tests/tests_unit/test_stages.py b/tests/tests_unit/test_stages.py new file mode 100644 index 0000000000..8c0facf8c9 --- /dev/null +++ b/tests/tests_unit/test_stages.py @@ -0,0 +1,93 @@ +import pathlib +from unittest.mock import patch + +import pytest + +from _nebari.stages.terraform_state import TerraformStateStage +from _nebari.utils import yaml +from _nebari.version import __version__ +from nebari import schema +from nebari.plugins import nebari_plugin_manager + +HERE = pathlib.Path(__file__).parent + + +@pytest.fixture +def mock_config(): + with open(HERE / "./cli_validate/local.happy.yaml", "r") as f: + mock_config_file = yaml.load(f) + mock_config_file["nebari_version"] = __version__ + + config = nebari_plugin_manager.config_schema.model_validate(mock_config_file) + return config + + +@pytest.fixture +def terraform_state_stage(mock_config, tmp_path): + return TerraformStateStage(tmp_path, mock_config) + + +@patch.object(TerraformStateStage, "get_nebari_config_state") +def test_check_immutable_fields_no_changes(mock_get_state, terraform_state_stage): + mock_get_state.return_value = terraform_state_stage.config + + # This should not raise an exception + terraform_state_stage.check_immutable_fields() + + +@patch.object(TerraformStateStage, "get_nebari_config_state") +def test_check_immutable_fields_mutable_change( + mock_get_state, terraform_state_stage, mock_config +): + old_config = mock_config.model_copy(deep=True) + old_config.namespace = "old-namespace" + mock_get_state.return_value = old_config + + # This should not raise an exception (namespace is mutable) + terraform_state_stage.check_immutable_fields() + + +@patch.object(TerraformStateStage, "get_nebari_config_state") +@patch.object(schema.Main, "model_fields") +def test_check_immutable_fields_immutable_change( + mock_model_fields, mock_get_state, terraform_state_stage, mock_config +): + old_config = mock_config.model_copy(deep=True) + old_config.provider = schema.ProviderEnum.gcp + mock_get_state.return_value = old_config + + # Mock the provider field to be immutable + mock_model_fields.__getitem__.return_value.json_schema_extra = {"immutable": True} + + with pytest.raises(ValueError) as exc_info: + terraform_state_stage.check_immutable_fields() + + assert 'Attempting to change immutable field "provider"' in str(exc_info.value) + + +@patch.object(TerraformStateStage, "get_nebari_config_state") +def test_check_immutable_fields_no_prior_state(mock_get_state, terraform_state_stage): + mock_get_state.return_value = None + + # This should not raise an exception + terraform_state_stage.check_immutable_fields() + + +@patch.object(TerraformStateStage, "get_nebari_config_state") +def test_check_dict_value_change(mock_get_state, terraform_state_stage, mock_config): + old_config = mock_config.model_copy(deep=True) + terraform_state_stage.config.local.node_selectors["worker"].value += "new_value" + mock_get_state.return_value = old_config + + # should not throw an exception + terraform_state_stage.check_immutable_fields() + + +@patch.object(TerraformStateStage, "get_nebari_config_state") +def test_check_list_change(mock_get_state, terraform_state_stage, mock_config): + old_config = mock_config.model_copy(deep=True) + old_config.environments["environment-dask.yaml"].channels.append("defaults") + mock_get_state.return_value = old_config + + # should not throw an exception + terraform_state_stage.check_immutable_fields() diff --git a/tests/tests_unit/test_upgrade.py b/tests/tests_unit/test_upgrade.py index 4871a1fe07..f6e3f80348 100644 --- a/tests/tests_unit/test_upgrade.py +++ b/tests/tests_unit/test_upgrade.py @@ -2,6 +2,7 @@ from pathlib import Path import pytest +from rich.prompt import Prompt from _nebari.upgrade import do_upgrade from _nebari.version import __version__, rounded_ver_parse @@ -48,8 +49,34 @@ def test_upgrade_4_0( qhub_users_import_json, monkeypatch, ): - # Return "y" when asked if you've deleted the Argo CRDs - monkeypatch.setattr("builtins.input", lambda: "y") + + def mock_input(prompt, **kwargs): + # Mock different upgrade steps prompt answers + if ( + prompt + == "Have you deleted the Argo Workflows CRDs and service accounts? [y/N] " + ): + return "y" + elif ( + prompt + == "\nDo you want Nebari to update the kube-prometheus-stack CRDs and delete the prometheus-node-exporter for you? If not, you'll have to do it manually." + ): + return "N" + elif ( + prompt + == "Have you backed up your custom dashboards (if necessary), deleted the prometheus-node-exporter daemonset and updated the kube-prometheus-stack CRDs?" + ): + return "y" + elif ( + prompt + == "[bold]Would you like Nebari to assign the corresponding role to all of your current groups automatically?[/bold]" + ): + return "N" + # All other prompts will be answered with "y" + else: + return "y" + + monkeypatch.setattr(Prompt, "ask", mock_input) old_qhub_config_path = Path(__file__).parent / old_qhub_config_path_str diff --git a/tests/tests_unit/test_utils.py b/tests/tests_unit/test_utils.py new file mode 100644 index 0000000000..678cd1f230 --- /dev/null +++ b/tests/tests_unit/test_utils.py @@ -0,0 +1,66 @@ +import pytest + +from _nebari.utils import JsonDiff, JsonDiffEnum, byte_unit_conversion + + +@pytest.mark.parametrize( + "value, from_unit, to_unit, expected", + [ + (1, "", "B", 1), + (1, "B", "B", 1), + (1, "KB", "B", 1000), + (1, "K", "B", 1000), + (1, "k", "b", 1000), + (1, "MB", "B", 1000**2), + (1, "GB", "B", 1000**3), + (1, "TB", "B", 1000**4), + (1, "KiB", "B", 1024), + (1, "MiB", "B", 1024**2), + (1, "GiB", "B", 1024**3), + (1, "TiB", "B", 1024**4), + (1000, "B", "KB", 1), + (1000, "KB", "K", 1000), + (1000, "K", "KB", 1000), + (1000, "MB", "KB", 1000**2), + (1000, "GB", "KB", 1000**3), + (1000, "TB", "KB", 1000**4), + (1000, "KiB", "KB", 1024), + (1000, "Ki", "KB", 1024), + (1000, "Ki", "K", 1024), + (1000, "MiB", "KB", 1024**2), + (1000, "GiB", "KB", 1024**3), + (1000, "TiB", "KB", 1024**4), + (1000**2, "B", "MB", 1), + (1000**2, "KB", "MB", 1000), + (1000**2, "MB", "MB", 1000**2), + (1000**2, "GB", "MB", 1000**3), + (1000**2, "TB", "MB", 1000**4), + (1000**2, "MiB", "MB", 1024**2), + (1000**3, "B", "GB", 1), + (1000**3, "KB", "GB", 1000), + ], +) +def test_byte_unit_conversion(value, from_unit, to_unit, expected): + assert byte_unit_conversion(f"{value} {from_unit}", to_unit) == expected + + +def test_JsonDiff_diff(): + obj1 = {"a": 1, "b": {"c": 2, "d": 3}} + obj2 = {"a": 1, "b": {"c": 3, "e": 4}, "f": 5} + diff = JsonDiff(obj1, obj2) + assert diff.diff == { + "b": { + "e": {JsonDiffEnum.ADDED: 4}, + "c": {JsonDiffEnum.MODIFIED: (2, 3)}, + "d": {JsonDiffEnum.REMOVED: 3}, + }, + "f": {JsonDiffEnum.ADDED: 5}, + } + + +def test_JsonDiff_modified(): + obj1 = {"a": 1, "b": {"!": 2, "-": 3}, "+": 4} + obj2 = {"a": 1, "b": {"!": 3, "+": 4}, "+": 5} + diff = JsonDiff(obj1, obj2) + modifieds = diff.modified() + assert sorted(modifieds) == sorted([(["b", "!"], 2, 3), (["+"], 4, 5)])