12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129 |
- #
- # Copyright 2018-2022 Elyra Authors
- #
- # Licensed under the Apache License, Version 2.0 (the "License");
- # you may not use this file except in compliance with the License.
- # You may obtain a copy of the License at
- #
- # http://www.apache.org/licenses/LICENSE-2.0
- #
- # Unless required by applicable law or agreed to in writing, software
- # distributed under the License is distributed on an "AS IS" BASIS,
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- # See the License for the specific language governing permissions and
- # limitations under the License.
- #
- """Tests for elyra-metadata application"""
- import json
- import os
- import shutil
- from tempfile import mkdtemp
- from tempfile import TemporaryDirectory
- from typing import List
- from typing import Optional
- import pytest
- from elyra.metadata.manager import MetadataManager
- from elyra.metadata.metadata import Metadata
- from elyra.metadata.schema import METADATA_TEST_SCHEMASPACE
- from elyra.metadata.schema import METADATA_TEST_SCHEMASPACE_ID
- from elyra.tests.metadata.test_utils import all_of_json
- from elyra.tests.metadata.test_utils import another_metadata_json
- from elyra.tests.metadata.test_utils import create_json_file
- from elyra.tests.metadata.test_utils import invalid_metadata_json
- from elyra.tests.metadata.test_utils import invalid_no_display_name_json
- from elyra.tests.metadata.test_utils import invalid_schema_name_json
- from elyra.tests.metadata.test_utils import one_of_json
- from elyra.tests.metadata.test_utils import PropertyTester
- from elyra.tests.metadata.test_utils import valid_display_name_json
- from elyra.tests.metadata.test_utils import valid_metadata2_json
- from elyra.tests.metadata.test_utils import valid_metadata_json
- os.environ["METADATA_TESTING"] = "1" # Enable metadata-tests schemaspace
- @pytest.fixture()
- def mock_data_dir():
- runtime_dir = mkdtemp(prefix="runtime_")
- orig_data_dir = os.environ.get("JUPYTER_DATA_DIR")
- os.environ["JUPYTER_DATA_DIR"] = runtime_dir
- yield runtime_dir # provide the fixture value
- shutil.rmtree(runtime_dir)
- if orig_data_dir:
- os.environ["JUPYTER_DATA_DIR"] = orig_data_dir
- else:
- os.environ.pop("JUPYTER_DATA_DIR")
- def test_no_opts(script_runner):
- ret = script_runner.run("elyra-metadata")
- assert ret.success is False
- message = (
- "No subcommand specified. One of: "
- "['list', 'create', 'update', 'install', 'remove', 'migrate', 'export', 'import'] "
- "must be specified."
- )
- assert message in ret.stdout
- def test_bad_subcommand(script_runner):
- ret = script_runner.run("elyra-metadata", "bogus-subcommand")
- assert ret.success is False
- assert (
- "Subcommand 'bogus-subcommand' is invalid. One of: "
- "['list', 'create', 'update', 'install', 'remove', 'migrate', 'export', 'import'] "
- "must be specified." in ret.stdout
- )
- def test_install_bad_argument(script_runner):
- ret = script_runner.run("elyra-metadata", "install", "--bogus-argument")
- assert ret.success is False
- assert "Subcommand '--bogus-argument' is invalid." in ret.stdout
- assert f"Install a metadata instance into schemaspace '{METADATA_TEST_SCHEMASPACE}'." in ret.stdout
- def test_install_bad_schemaspace(script_runner):
- ret = script_runner.run("elyra-metadata", "install", "bogus-schemaspace")
- assert ret.success is False
- assert "Subcommand 'bogus-schemaspace' is invalid." in ret.stdout
- assert f"Install a metadata instance into schemaspace '{METADATA_TEST_SCHEMASPACE}'." in ret.stdout
- def test_install_help(script_runner):
- ret = script_runner.run("elyra-metadata", "install", METADATA_TEST_SCHEMASPACE, "--help")
- assert ret.success is False
- assert f"Install a metadata instance into schemaspace '{METADATA_TEST_SCHEMASPACE}'." in ret.stdout
- def test_install_no_schema_single(script_runner, mock_data_dir):
- # Use the runtime-images schemaspace since that is most likely to always be a single-schema schemaspace.
- # Note: this test will break if it ever supports multiple.
- ret = script_runner.run("elyra-metadata", "install", "runtime-images")
- assert ret.success is False
- assert "ERROR: '--display_name' is a required parameter." in ret.stdout
- def test_install_no_schema_multiple(script_runner, mock_data_dir):
- ret = script_runner.run("elyra-metadata", "install", METADATA_TEST_SCHEMASPACE)
- assert ret.success is False
- # Since order in dictionaries, where the one-of list is derived, can be random, just check up to the
- # first known difference in the schema names.
- assert (
- "ERROR: '--schema_name' is a required parameter and must be one of the "
- "following values: ['metadata-test" in ret.stdout
- )
- def test_install_bad_schema_multiple(script_runner, mock_data_dir):
- ret = script_runner.run("elyra-metadata", "install", METADATA_TEST_SCHEMASPACE, "--schema_name=metadata-foo")
- assert ret.success is False
- assert "ERROR: Parameter '--schema_name' requires one of the " "following values: ['metadata-test" in ret.stdout
- def test_install_no_name(script_runner, mock_data_dir):
- ret = script_runner.run("elyra-metadata", "install", METADATA_TEST_SCHEMASPACE, "--schema_name=metadata-test")
- assert ret.success is False
- assert "ERROR: '--display_name' is a required parameter." in ret.stdout
- def test_install_complex_usage(script_runner, mock_data_dir):
- ret = script_runner.run("elyra-metadata", "install", METADATA_TEST_SCHEMASPACE, "--schema_name=metadata-test")
- assert ret.success is False
- assert "Note: The following properties in this schema contain JSON keywords that are not supported" in ret.stdout
- assert "*** References unsupported keywords: {'oneOf'}" in ret.stdout
- assert "*** References unsupported keywords: {'allOf'}" in ret.stdout
- assert "*** References unsupported keywords: {'$ref'}" in ret.stdout
- def test_install_only_display_name(script_runner, mock_data_dir):
- metadata_display_name = "1 teste 'rápido'"
- metadata_name = "a_1_teste_rpido"
- ret = script_runner.run(
- "elyra-metadata",
- "install",
- METADATA_TEST_SCHEMASPACE,
- "--schema_name=metadata-test",
- f"--display_name={metadata_display_name}",
- "--required_test=required_value",
- )
- assert ret.success is True
- assert f"Metadata instance '{metadata_name}' for schema 'metadata-test' has been written to:" in ret.stdout
- # Ensure it can be fetched by name...
- metadata_manager = MetadataManager(schemaspace=METADATA_TEST_SCHEMASPACE_ID)
- resource = metadata_manager.get(metadata_name)
- assert resource.display_name == metadata_display_name
- def test_install_invalid_name(script_runner, mock_data_dir):
- ret = script_runner.run(
- "elyra-metadata",
- "install",
- METADATA_TEST_SCHEMASPACE,
- "--schema_name=metadata-test",
- "--name=UPPER_CASE_NOT_ALLOWED",
- "--display_name=display_name",
- "--required_test=required_value",
- )
- assert ret.success is False
- assert "The following exception occurred saving metadata instance for schema 'metadata-test'" in ret.stdout
- assert "Name of metadata must be lowercase alphanumeric" in ret.stdout
- def test_install_simple(script_runner, mock_data_dir):
- expected_file = os.path.join(
- mock_data_dir, "metadata", METADATA_TEST_SCHEMASPACE, "test-metadata_42_valid-name.json"
- )
- # Cleanup from any potential previous failures
- if os.path.exists(expected_file):
- os.remove(expected_file)
- ret = script_runner.run(
- "elyra-metadata",
- "install",
- METADATA_TEST_SCHEMASPACE,
- "--schema_name=metadata-test",
- "--name=test-metadata_42_valid-name",
- "--display_name=display_name",
- "--required_test=required_value",
- )
- assert ret.success
- assert "Metadata instance 'test-metadata_42_valid-name' for schema 'metadata-test' has been written" in ret.stdout
- assert os.path.isdir(os.path.join(mock_data_dir, "metadata", METADATA_TEST_SCHEMASPACE))
- assert os.path.isfile(expected_file)
- with open(expected_file, "r") as fd:
- instance_json = json.load(fd)
- assert instance_json["schema_name"] == "metadata-test"
- assert instance_json["display_name"] == "display_name"
- assert instance_json["metadata"]["required_test"] == "required_value"
- assert instance_json["metadata"]["number_default_test"] == 42 # defaults will always persist
- @pytest.mark.parametrize("option_style", ["equals", "sans-equals", "missing"])
- def test_create_from_file(script_runner, mock_data_dir, option_style):
- content = create_json_file(mock_data_dir, "valid.json", valid_metadata_json)
- argv: List[str] = [
- "elyra-metadata",
- "install",
- METADATA_TEST_SCHEMASPACE,
- ]
- if option_style == "equals":
- argv.append(f"--file={content}")
- elif option_style == "sans-equals":
- argv.append("--file")
- argv.append(f"{content}")
- else: # missing
- argv.append("--file")
- ret = script_runner.run(*argv)
- if option_style == "missing":
- assert ret.success is False
- assert (
- "ERROR: Parameter '--file' requires a file with format JSON and no value was provided. "
- "Try again with an appropriate value." in ret.stdout
- )
- else: # success expected
- assert ret.success
- assert "Metadata instance 'valid' for schema 'metadata-test' has been written" in ret.stdout
- @pytest.mark.parametrize("option_style", ["equals", "sans-equals", "missing"])
- def test_create_from_json(script_runner, mock_data_dir, option_style):
- content = json.dumps(valid_metadata_json)
- argv: List[str] = [
- "elyra-metadata",
- "install",
- METADATA_TEST_SCHEMASPACE,
- ]
- if option_style == "equals":
- argv.append(f"--json={content}")
- elif option_style == "sans-equals":
- argv.append("--json")
- argv.append(f"{content}")
- else: # missing
- argv.append("--json")
- ret = script_runner.run(*argv)
- if option_style == "missing":
- assert ret.success is False
- assert (
- "ERROR: Parameter '--json' requires a value with format JSON and no value was provided. "
- "Try again with an appropriate value." in ret.stdout
- )
- else: # success expected
- assert ret.success
- assert "Metadata instance 'valid_metadata_instance' for schema 'metadata-test' has been written" in ret.stdout
- def test_install_and_replace(script_runner, mock_data_dir):
- expected_file = os.path.join(
- mock_data_dir, "metadata", METADATA_TEST_SCHEMASPACE, "test-metadata_42_valid-name.json"
- )
- # Cleanup from any potential previous failures
- if os.path.exists(expected_file):
- os.remove(expected_file)
- # Attempt replace before schemaspace exists and ensure appropriate error message
- ret = script_runner.run(
- "elyra-metadata",
- "install",
- METADATA_TEST_SCHEMASPACE,
- "--schema_name=metadata-test",
- "--name=test-metadata_42_valid-name",
- "--display_name=display_name",
- "--required_test=required_value",
- "--replace",
- )
- assert ret.success is False
- assert (
- "No such instance named 'test-metadata_42_valid-name' was found in the metadata-tests schemaspace."
- in ret.stdout
- )
- ret = script_runner.run(
- "elyra-metadata",
- "install",
- METADATA_TEST_SCHEMASPACE,
- "--schema_name=metadata-test",
- "--name=test-metadata_42_valid-name",
- "--display_name=display_name",
- "--required_test=required_value",
- "--number_default_test=24",
- )
- assert ret.success
- assert "Metadata instance 'test-metadata_42_valid-name' for schema 'metadata-test' has been written" in ret.stdout
- assert expected_file in ret.stdout
- assert os.path.isdir(os.path.join(mock_data_dir, "metadata", METADATA_TEST_SCHEMASPACE))
- assert os.path.isfile(expected_file)
- with open(expected_file, "r") as fd:
- instance_json = json.load(fd)
- assert instance_json["metadata"]["number_default_test"] == 24 # ensure CLI value is used over default
- # Re-attempt w/o replace flag - failure expected
- ret = script_runner.run(
- "elyra-metadata",
- "install",
- METADATA_TEST_SCHEMASPACE,
- "--schema_name=metadata-test",
- "--name=test-metadata_42_valid-name",
- "--display_name=display_name",
- "--required_test=required_value",
- )
- assert ret.success is False
- assert (
- "An instance named 'test-metadata_42_valid-name' already exists in the metadata-tests "
- "schemaspace" in ret.stderr
- )
- # Re-attempt with replace flag but without --name - failure expected
- ret = script_runner.run(
- "elyra-metadata",
- "install",
- METADATA_TEST_SCHEMASPACE,
- "--schema_name=metadata-test",
- "--display_name=display_name",
- "--required_test=required_value",
- "--replace",
- )
- assert ret.success is False
- assert "The 'name' parameter requires a value" in ret.stdout
- # Re-attempt with replace flag - success expected
- ret = script_runner.run(
- "elyra-metadata",
- "install",
- METADATA_TEST_SCHEMASPACE,
- "--schema_name=metadata-test",
- "--name=test-metadata_42_valid-name",
- "--display_name=display_name",
- "--required_test=required_value",
- "--replace",
- )
- assert ret.success
- assert "Metadata instance 'test-metadata_42_valid-name' for schema 'metadata-test' has been written" in ret.stdout
- assert os.path.isdir(os.path.join(mock_data_dir, "metadata", METADATA_TEST_SCHEMASPACE))
- assert os.path.isfile(expected_file)
- with open(expected_file, "r") as fd:
- instance_json = json.load(fd)
- assert instance_json["schema_name"] == "metadata-test"
- assert instance_json["display_name"] == "display_name"
- assert instance_json["metadata"]["required_test"] == "required_value"
- assert instance_json["metadata"]["number_default_test"] == 24 # ensure original value is used over default
- @pytest.mark.parametrize("complex_keyword", ["defs", "oneOf", "allOf"])
- def test_install_and_replace_complex(script_runner, mock_data_dir, complex_keyword):
- # create and use deep copies of the global one_of_json and one_of_json
- # to avoid side effects
- one_of_json_cp = json.loads(json.dumps(one_of_json))
- all_of_json_cp = json.loads(json.dumps(all_of_json))
- test_file: Optional[str] = None
- name: str = f"test-complex-{complex_keyword}".lower()
- if complex_keyword == "defs":
- option = "--json"
- value = '{ "defs_test": 42 }'
- elif complex_keyword == "oneOf":
- option = "--file"
- # Build the file...
- test_file = os.path.join(mock_data_dir, f"{complex_keyword}.json")
- with open(test_file, mode="w") as one_of_fd:
- json.dump(one_of_json_cp, one_of_fd)
- value = test_file
- else: # allOf
- option = "--allOf_test" # Use "ovp-from-file" approach
- # Build the file...
- test_file = os.path.join(mock_data_dir, f"{complex_keyword}.json")
- with open(test_file, mode="w") as all_of_fd:
- json.dump(all_of_json_cp, all_of_fd)
- value = test_file
- expected_file = os.path.join(mock_data_dir, "metadata", METADATA_TEST_SCHEMASPACE, f"{name}.json")
- # Cleanup from any potential previous failures (should be rare)
- if os.path.exists(expected_file):
- os.remove(expected_file)
- ret = script_runner.run(
- "elyra-metadata",
- "install",
- METADATA_TEST_SCHEMASPACE,
- "--schema_name=metadata-test",
- f"--name={name}",
- f"--display_name=Test Complex {complex_keyword}",
- "--required_test=required_value",
- f"{option}={value}",
- )
- assert ret.success
- assert f"Metadata instance '{name}' for schema 'metadata-test' has been written" in ret.stdout
- assert expected_file in ret.stdout
- assert os.path.exists(expected_file)
- with open(expected_file) as fd:
- json_results = json.load(fd)
- # Verify common stuff
- assert json_results["display_name"] == f"Test Complex {complex_keyword}"
- assert json_results["metadata"]["required_test"] == "required_value"
- # Verify result and prepare for replace...
- if complex_keyword == "defs":
- assert json_results["metadata"]["defs_test"] == 42
- value = '{ "defs_test": 24 }'
- elif complex_keyword == "oneOf":
- assert json_results["metadata"]["oneOf_test"]["obj_switch"] == "obj2"
- assert json_results["metadata"]["oneOf_test"]["obj2_prop1"] == 42
- one_of_json_cp["metadata"]["oneOf_test"]["obj2_prop1"] = 24
- with open(test_file, mode="w+") as one_of_fd:
- json.dump(one_of_json_cp, one_of_fd)
- elif complex_keyword == "allOf":
- assert len(json_results["metadata"]["allOf_test"]) == 9
- assert json_results["metadata"]["allOf_test"]["obj1_switch"] == "obj1"
- assert json_results["metadata"]["allOf_test"]["obj1_prop1"] == "allOf-test-val1"
- assert json_results["metadata"]["allOf_test"]["obj1_prop2"] == "allOf-test-val2"
- all_of_json_cp["obj1_prop1"] = "allOf-test-val1-replace"
- assert json_results["metadata"]["allOf_test"]["obj2_switch"] == "obj2"
- assert json_results["metadata"]["allOf_test"]["obj2_prop1"] == 42
- assert json_results["metadata"]["allOf_test"]["obj2_prop2"] == 24
- all_of_json_cp["obj2_prop1"] = 24
- assert json_results["metadata"]["allOf_test"]["obj3_switch"] == "obj3"
- assert json_results["metadata"]["allOf_test"]["obj3_prop1"] == 42.7
- assert json_results["metadata"]["allOf_test"]["obj3_prop2"] is True
- all_of_json_cp["obj3_prop1"] = 7.24
- with open(test_file, mode="w+") as all_of_fd:
- json.dump(all_of_json_cp, all_of_fd)
- # Replace the previously-created instance
- ret = script_runner.run(
- "elyra-metadata",
- "install",
- METADATA_TEST_SCHEMASPACE,
- "--schema_name=metadata-test",
- f"--name={name}",
- f"--display_name=Test Complex {complex_keyword}2",
- "--required_test=required_value",
- f"{option}={value}",
- "--replace",
- )
- assert ret.success
- assert f"Metadata instance '{name}' for schema 'metadata-test' has been written" in ret.stdout
- assert expected_file in ret.stdout
- assert os.path.exists(expected_file)
- with open(expected_file) as fd:
- json_results = json.load(fd)
- # Verify common stuff
- assert json_results["display_name"] == f"Test Complex {complex_keyword}2"
- assert json_results["metadata"]["required_test"] == "required_value"
- # Verify result following replace...
- if complex_keyword == "defs":
- assert json_results["metadata"]["defs_test"] == 24
- elif complex_keyword == "oneOf":
- assert json_results["metadata"]["oneOf_test"]["obj_switch"] == "obj2"
- assert json_results["metadata"]["oneOf_test"]["obj2_prop1"] == 24
- assert json_results["metadata"]["oneOf_test"]["obj2_prop2"] == 24
- elif complex_keyword == "allOf":
- assert len(json_results["metadata"]["allOf_test"]) == 9
- assert json_results["metadata"]["allOf_test"]["obj1_prop1"] == "allOf-test-val1-replace"
- assert json_results["metadata"]["allOf_test"]["obj1_prop2"] == "allOf-test-val2"
- assert json_results["metadata"]["allOf_test"]["obj2_prop1"] == 24
- assert json_results["metadata"]["allOf_test"]["obj2_prop2"] == 24
- assert json_results["metadata"]["allOf_test"]["obj3_prop1"] == 7.24
- assert json_results["metadata"]["allOf_test"]["obj3_prop2"] is True
- # ---------- begin of 'create' command tests
- def test_create_bad_argument(script_runner):
- ret = script_runner.run("elyra-metadata", "create", "--bogus-argument")
- assert ret.success is False
- assert "Subcommand '--bogus-argument' is invalid." in ret.stdout
- assert f"Create a metadata instance in schemaspace '{METADATA_TEST_SCHEMASPACE}'." in ret.stdout
- def test_create_bad_schemaspace(script_runner):
- ret = script_runner.run("elyra-metadata", "create", "bogus-schemaspace")
- assert ret.success is False
- assert "Subcommand 'bogus-schemaspace' is invalid." in ret.stdout
- assert f"Create a metadata instance in schemaspace '{METADATA_TEST_SCHEMASPACE}'." in ret.stdout
- def test_create_help(script_runner):
- ret = script_runner.run("elyra-metadata", "create", METADATA_TEST_SCHEMASPACE, "--help")
- assert ret.success is False
- assert f"Create a metadata instance in schemaspace '{METADATA_TEST_SCHEMASPACE}'." in ret.stdout
- def test_create_no_schema_single(script_runner, mock_data_dir):
- # Use the runtime-images schemaspace since that is most likely to always be a single-schema schemaspace.
- # Note: this test will break if it ever supports multiple.
- ret = script_runner.run("elyra-metadata", "create", "runtime-images")
- assert ret.success is False
- assert "ERROR: '--display_name' is a required parameter." in ret.stdout
- def test_create_no_schema_multiple(script_runner, mock_data_dir):
- ret = script_runner.run("elyra-metadata", "create", METADATA_TEST_SCHEMASPACE)
- assert ret.success is False
- # Since order in dictionaries, where the one-of list is derived, can be random, just check up to the
- # first known difference in the schema names.
- assert (
- "ERROR: '--schema_name' is a required parameter and must be one of the "
- "following values: ['metadata-test" in ret.stdout
- )
- def test_create_bad_schema_multiple(script_runner, mock_data_dir):
- ret = script_runner.run("elyra-metadata", "create", METADATA_TEST_SCHEMASPACE, "--schema_name=metadata-foo")
- assert ret.success is False
- assert "ERROR: Parameter '--schema_name' requires one of the " "following values: ['metadata-test" in ret.stdout
- def test_create_no_name(script_runner, mock_data_dir):
- ret = script_runner.run("elyra-metadata", "create", METADATA_TEST_SCHEMASPACE, "--schema_name=metadata-test")
- assert ret.success is False
- assert "ERROR: '--display_name' is a required parameter." in ret.stdout
- def test_create_complex_usage(script_runner, mock_data_dir):
- ret = script_runner.run("elyra-metadata", "create", METADATA_TEST_SCHEMASPACE, "--schema_name=metadata-test")
- assert ret.success is False
- assert "Note: The following properties in this schema contain JSON keywords that are not supported" in ret.stdout
- assert "*** References unsupported keywords: {'oneOf'}" in ret.stdout
- assert "*** References unsupported keywords: {'allOf'}" in ret.stdout
- assert "*** References unsupported keywords: {'$ref'}" in ret.stdout
- def test_create_only_display_name(script_runner, mock_data_dir):
- metadata_display_name = "1 teste 'rápido'"
- metadata_name = "a_1_teste_rpido"
- ret = script_runner.run(
- "elyra-metadata",
- "create",
- METADATA_TEST_SCHEMASPACE,
- "--schema_name=metadata-test",
- f"--display_name={metadata_display_name}",
- "--required_test=required_value",
- )
- assert ret.success is True
- assert f"Metadata instance '{metadata_name}' for schema 'metadata-test' has been written to:" in ret.stdout
- # Ensure it can be fetched by name...
- metadata_manager = MetadataManager(schemaspace=METADATA_TEST_SCHEMASPACE_ID)
- resource = metadata_manager.get(metadata_name)
- assert resource.display_name == metadata_display_name
- def test_create_invalid_name(script_runner, mock_data_dir):
- ret = script_runner.run(
- "elyra-metadata",
- "create",
- METADATA_TEST_SCHEMASPACE,
- "--schema_name=metadata-test",
- "--name=UPPER_CASE_NOT_ALLOWED",
- "--display_name=display_name",
- "--required_test=required_value",
- )
- assert ret.success is False
- assert "The following exception occurred saving metadata instance for schema 'metadata-test'" in ret.stdout
- assert "Name of metadata must be lowercase alphanumeric" in ret.stdout
- def test_create_simple(script_runner, mock_data_dir):
- expected_file = os.path.join(
- mock_data_dir, "metadata", METADATA_TEST_SCHEMASPACE, "test-metadata_42_valid-name.json"
- )
- # Cleanup from any potential previous failures
- if os.path.exists(expected_file):
- os.remove(expected_file)
- ret = script_runner.run(
- "elyra-metadata",
- "create",
- METADATA_TEST_SCHEMASPACE,
- "--schema_name=metadata-test",
- "--name=test-metadata_42_valid-name",
- "--display_name=display_name",
- "--required_test=required_value",
- )
- assert ret.success
- assert "Metadata instance 'test-metadata_42_valid-name' for schema 'metadata-test' has been written" in ret.stdout
- assert os.path.isdir(os.path.join(mock_data_dir, "metadata", METADATA_TEST_SCHEMASPACE))
- assert os.path.isfile(expected_file)
- with open(expected_file, "r") as fd:
- instance_json = json.load(fd)
- assert instance_json["schema_name"] == "metadata-test"
- assert instance_json["display_name"] == "display_name"
- assert instance_json["metadata"]["required_test"] == "required_value"
- assert instance_json["metadata"]["number_default_test"] == 42 # defaults will always persist
- def test_create_existing(script_runner, mock_data_dir):
- expected_file = os.path.join(
- mock_data_dir, "metadata", METADATA_TEST_SCHEMASPACE, "test-metadata_42_valid-name.json"
- )
- # Cleanup from any potential previous failures
- if os.path.exists(expected_file):
- os.remove(expected_file)
- ret = script_runner.run(
- "elyra-metadata",
- "create",
- METADATA_TEST_SCHEMASPACE,
- "--schema_name=metadata-test",
- "--name=test-metadata_42_valid-name",
- "--display_name=display_name",
- "--required_test=required_value",
- "--number_default_test=24",
- )
- assert ret.success
- assert "Metadata instance 'test-metadata_42_valid-name' for schema 'metadata-test' has been written" in ret.stdout
- assert expected_file in ret.stdout
- assert os.path.isdir(os.path.join(mock_data_dir, "metadata", METADATA_TEST_SCHEMASPACE))
- assert os.path.isfile(expected_file)
- with open(expected_file, "r") as fd:
- instance_json = json.load(fd)
- assert instance_json["metadata"]["number_default_test"] == 24 # ensure CLI value is used over default
- # Re-attempt create - failure expected
- ret = script_runner.run(
- "elyra-metadata",
- "create",
- METADATA_TEST_SCHEMASPACE,
- "--schema_name=metadata-test",
- "--name=test-metadata_42_valid-name",
- "--display_name=display_name",
- "--required_test=required_value",
- )
- assert ret.success is False
- assert (
- "An instance named 'test-metadata_42_valid-name' already exists in the metadata-tests "
- "schemaspace" in ret.stderr
- )
- def test_create_complex(script_runner, mock_data_dir):
- complex_keyword = "defs"
- name: str = f"test-complex-{complex_keyword}".lower()
- option = "--json"
- value = '{ "defs_test": 42 }'
- expected_file = os.path.join(mock_data_dir, "metadata", METADATA_TEST_SCHEMASPACE, f"{name}.json")
- # Cleanup from any potential previous failures (should be rare)
- if os.path.exists(expected_file):
- os.remove(expected_file)
- ret = script_runner.run(
- "elyra-metadata",
- "create",
- METADATA_TEST_SCHEMASPACE,
- "--schema_name=metadata-test",
- f"--name={name}",
- f"--display_name=Test Complex {complex_keyword}",
- "--required_test=required_value",
- f"{option}", # don't use '=' between option and value
- f"{value}",
- )
- assert ret.success
- assert f"Metadata instance '{name}' for schema 'metadata-test' has been written" in ret.stdout
- assert expected_file in ret.stdout
- assert os.path.exists(expected_file)
- with open(expected_file) as fd:
- json_results = json.load(fd)
- # Verify common stuff
- assert json_results["display_name"] == f"Test Complex {complex_keyword}"
- assert json_results["metadata"]["required_test"] == "required_value"
- assert json_results["metadata"]["defs_test"] == 42
- # ---------- end of 'create' command tests
- #
- # ---------- begin of 'update' command tests
- def test_update_bad_argument(script_runner):
- ret = script_runner.run("elyra-metadata", "update", "--bogus-argument")
- assert ret.success is False
- assert "Subcommand '--bogus-argument' is invalid." in ret.stdout
- assert f"Update a metadata instance in schemaspace '{METADATA_TEST_SCHEMASPACE}'." in ret.stdout
- def test_update_bad_schemaspace(script_runner):
- ret = script_runner.run("elyra-metadata", "update", "bogus-schemaspace")
- assert ret.success is False
- assert "Subcommand 'bogus-schemaspace' is invalid." in ret.stdout
- assert f"Update a metadata instance in schemaspace '{METADATA_TEST_SCHEMASPACE}'." in ret.stdout
- def test_update_help(script_runner):
- ret = script_runner.run("elyra-metadata", "update", METADATA_TEST_SCHEMASPACE, "--help")
- assert ret.success is False
- assert f"Update a metadata instance in schemaspace '{METADATA_TEST_SCHEMASPACE}'." in ret.stdout
- def test_update_no_schema_single(script_runner, mock_data_dir):
- # Use the runtime-images schemaspace since that is most likely to always be a single-schema schemaspace.
- # Note: this test will break if it ever supports multiple.
- ret = script_runner.run("elyra-metadata", "update", "runtime-images")
- assert ret.success is False
- assert (
- "The following exception occurred saving metadata instance for schema 'runtime-image': "
- "The 'name' parameter requires a value." in ret.stdout
- )
- def test_update_no_schema_multiple(script_runner, mock_data_dir):
- ret = script_runner.run("elyra-metadata", "update", METADATA_TEST_SCHEMASPACE)
- assert ret.success is False
- # Since order in dictionaries, where the one-of list is derived, can be random, just check up to the
- # first known difference in the schema names.
- assert (
- "ERROR: '--schema_name' is a required parameter and must be one of the "
- "following values: ['metadata-test" in ret.stdout
- )
- def test_update_bad_schema_multiple(script_runner, mock_data_dir):
- ret = script_runner.run("elyra-metadata", "update", METADATA_TEST_SCHEMASPACE, "--schema_name=metadata-foo")
- assert ret.success is False
- assert "ERROR: Parameter '--schema_name' requires one of the " "following values: ['metadata-test" in ret.stdout
- def test_update_no_name(script_runner, mock_data_dir):
- ret = script_runner.run("elyra-metadata", "update", METADATA_TEST_SCHEMASPACE, "--schema_name=metadata-test")
- assert ret.success is False
- assert (
- "The following exception occurred saving metadata instance for schema 'metadata-test': "
- "The 'name' parameter requires a value." in ret.stdout
- )
- def test_update_no_instance(script_runner, mock_data_dir):
- """Attempt replace before instance exists"""
- ret = script_runner.run(
- "elyra-metadata",
- "update",
- METADATA_TEST_SCHEMASPACE,
- "--schema_name=metadata-test",
- "--name=test-metadata_42_valid-name",
- "--display_name=display_name",
- "--required_test=required_value",
- )
- assert ret.success is False
- assert (
- "No such instance named 'test-metadata_42_valid-name' was found in the metadata-tests schemaspace."
- in ret.stdout
- )
- def test_update_simple(script_runner, mock_data_dir):
- expected_file = os.path.join(
- mock_data_dir, "metadata", METADATA_TEST_SCHEMASPACE, "test-metadata_42_valid-name.json"
- )
- # Cleanup from any potential previous failures
- if os.path.exists(expected_file):
- os.remove(expected_file)
- # create an instance
- ret = script_runner.run(
- "elyra-metadata",
- "create",
- METADATA_TEST_SCHEMASPACE,
- "--schema_name=metadata-test",
- "--name=test-metadata_42_valid-name",
- "--display_name=display_name",
- "--required_test=required_value",
- )
- assert ret.success
- assert "Metadata instance 'test-metadata_42_valid-name' for schema 'metadata-test' has been written" in ret.stdout
- # update instance
- ret = script_runner.run(
- "elyra-metadata",
- "update",
- METADATA_TEST_SCHEMASPACE,
- "--schema_name=metadata-test",
- "--name=test-metadata_42_valid-name",
- "--display_name=display_name",
- "--required_test=updated_required_value",
- )
- assert ret.success
- assert "Metadata instance 'test-metadata_42_valid-name' for schema 'metadata-test' has been written" in ret.stdout
- assert os.path.isdir(os.path.join(mock_data_dir, "metadata", METADATA_TEST_SCHEMASPACE))
- assert os.path.isfile(expected_file)
- with open(expected_file, "r") as fd:
- instance_json = json.load(fd)
- assert instance_json["schema_name"] == "metadata-test"
- assert instance_json["display_name"] == "display_name"
- assert instance_json["metadata"]["required_test"] == "updated_required_value"
- assert instance_json["metadata"]["number_default_test"] == 42 # defaults will always persist
- @pytest.mark.parametrize("complex_keyword", ["defs", "oneOf", "allOf"])
- def test_update_complex(script_runner, mock_data_dir, complex_keyword):
- # create and use deep copies of the global one_of_json and one_of_json
- # to avoid side effects
- one_of_json_cp = json.loads(json.dumps(one_of_json))
- all_of_json_cp = json.loads(json.dumps(all_of_json))
- test_file: Optional[str] = None
- name: str = f"test-complex-{complex_keyword}".lower()
- if complex_keyword == "defs":
- option = "--json"
- value = '{ "defs_test": 42 }'
- elif complex_keyword == "oneOf":
- option = "--file"
- # Build the file...
- test_file = os.path.join(mock_data_dir, f"{complex_keyword}.json")
- with open(test_file, mode="w") as one_of_fd:
- json.dump(one_of_json_cp, one_of_fd)
- value = test_file
- else: # allOf
- option = "--allOf_test" # Use "ovp-from-file" approach
- # Build the file...
- test_file = os.path.join(mock_data_dir, f"{complex_keyword}.json")
- with open(test_file, mode="w") as all_of_fd:
- json.dump(all_of_json_cp, all_of_fd)
- value = test_file
- expected_file = os.path.join(mock_data_dir, "metadata", METADATA_TEST_SCHEMASPACE, f"{name}.json")
- # Cleanup from any potential previous failures (should be rare)
- if os.path.exists(expected_file):
- os.remove(expected_file)
- # create instance
- ret = script_runner.run(
- "elyra-metadata",
- "create",
- METADATA_TEST_SCHEMASPACE,
- "--schema_name=metadata-test",
- f"--name={name}",
- f"--display_name=Test Complex {complex_keyword}",
- "--required_test=required_value",
- f"{option}={value}",
- )
- assert ret.success
- assert f"Metadata instance '{name}' for schema 'metadata-test' has been written" in ret.stdout
- assert expected_file in ret.stdout
- assert os.path.exists(expected_file)
- with open(expected_file) as fd:
- json_results = json.load(fd)
- # Verify common stuff
- assert json_results["display_name"] == f"Test Complex {complex_keyword}"
- assert json_results["metadata"]["required_test"] == "required_value"
- # Verify result and prepare for replace...
- if complex_keyword == "defs":
- assert json_results["metadata"]["defs_test"] == 42
- value = '{ "defs_test": 24 }'
- elif complex_keyword == "oneOf":
- assert json_results["metadata"]["oneOf_test"]["obj_switch"] == "obj2"
- assert json_results["metadata"]["oneOf_test"]["obj2_prop1"] == 42, f"--> {json_results}"
- one_of_json_cp["metadata"]["oneOf_test"]["obj2_prop1"] = 24
- with open(test_file, mode="w+") as one_of_fd:
- json.dump(one_of_json_cp, one_of_fd)
- elif complex_keyword == "allOf":
- assert len(json_results["metadata"]["allOf_test"]) == 9
- assert json_results["metadata"]["allOf_test"]["obj1_switch"] == "obj1"
- assert json_results["metadata"]["allOf_test"]["obj1_prop1"] == "allOf-test-val1"
- assert json_results["metadata"]["allOf_test"]["obj1_prop2"] == "allOf-test-val2"
- all_of_json_cp["obj1_prop1"] = "allOf-test-val1-replace"
- assert json_results["metadata"]["allOf_test"]["obj2_switch"] == "obj2"
- assert json_results["metadata"]["allOf_test"]["obj2_prop1"] == 42
- assert json_results["metadata"]["allOf_test"]["obj2_prop2"] == 24
- all_of_json_cp["obj2_prop1"] = 24
- assert json_results["metadata"]["allOf_test"]["obj3_switch"] == "obj3"
- assert json_results["metadata"]["allOf_test"]["obj3_prop1"] == 42.7
- assert json_results["metadata"]["allOf_test"]["obj3_prop2"] is True
- all_of_json_cp["obj3_prop1"] = 7.24
- with open(test_file, mode="w+") as all_of_fd:
- json.dump(all_of_json_cp, all_of_fd)
- # Replace the previously created instance
- ret = script_runner.run(
- "elyra-metadata",
- "update",
- METADATA_TEST_SCHEMASPACE,
- "--schema_name=metadata-test",
- f"--name={name}",
- f"--display_name=Test Complex {complex_keyword}2",
- "--required_test=required_value",
- f"{option}={value}",
- )
- assert ret.success
- assert f"Metadata instance '{name}' for schema 'metadata-test' has been written" in ret.stdout
- assert expected_file in ret.stdout
- assert os.path.exists(expected_file)
- with open(expected_file) as fd:
- json_results = json.load(fd)
- # Verify common stuff
- assert json_results["display_name"] == f"Test Complex {complex_keyword}2"
- assert json_results["metadata"]["required_test"] == "required_value"
- # Verify result following replace...
- if complex_keyword == "defs":
- assert json_results["metadata"]["defs_test"] == 24
- elif complex_keyword == "oneOf":
- assert json_results["metadata"]["oneOf_test"]["obj_switch"] == "obj2"
- assert json_results["metadata"]["oneOf_test"]["obj2_prop1"] == 24
- assert json_results["metadata"]["oneOf_test"]["obj2_prop2"] == 24
- elif complex_keyword == "allOf":
- assert len(json_results["metadata"]["allOf_test"]) == 9
- assert json_results["metadata"]["allOf_test"]["obj1_prop1"] == "allOf-test-val1-replace"
- assert json_results["metadata"]["allOf_test"]["obj1_prop2"] == "allOf-test-val2"
- assert json_results["metadata"]["allOf_test"]["obj2_prop1"] == 24
- assert json_results["metadata"]["allOf_test"]["obj2_prop2"] == 24
- assert json_results["metadata"]["allOf_test"]["obj3_prop1"] == 7.24
- assert json_results["metadata"]["allOf_test"]["obj3_prop2"] is True
- # ---------- end of 'update' command tests
- def test_list_help(script_runner):
- ret = script_runner.run("elyra-metadata", "list", METADATA_TEST_SCHEMASPACE, "--help")
- assert ret.success is False
- assert f"List installed metadata for {METADATA_TEST_SCHEMASPACE}." in ret.stdout
- def test_list_bad_argument(script_runner):
- ret = script_runner.run("elyra-metadata", "list", METADATA_TEST_SCHEMASPACE, "--bogus-argument")
- assert ret.success is False
- assert "ERROR: The following arguments were unexpected: ['--bogus-argument']" in ret.stdout
- def test_list_instances(script_runner, mock_data_dir):
- metadata_manager = MetadataManager(schemaspace=METADATA_TEST_SCHEMASPACE)
- ret = script_runner.run("elyra-metadata", "list", METADATA_TEST_SCHEMASPACE)
- assert ret.success
- lines = ret.stdout.split("\n")
- assert len(lines) == 2 # always 2 more than the actual runtime count
- assert f"No metadata instances found for {METADATA_TEST_SCHEMASPACE}" in lines[0]
- valid = Metadata(**valid_metadata_json)
- resource = metadata_manager.create("valid", valid)
- assert resource is not None
- resource = metadata_manager.create("valid2", valid)
- assert resource is not None
- another = Metadata(**another_metadata_json)
- resource = metadata_manager.create("another", another)
- assert resource is not None
- resource = metadata_manager.create("another2", another)
- assert resource is not None
- ret = script_runner.run("elyra-metadata", "list", METADATA_TEST_SCHEMASPACE)
- assert ret.success
- lines = ret.stdout.split("\n")
- assert len(lines) == 9 # always 5 more than the actual runtime count
- assert lines[0] == f"Available metadata instances for {METADATA_TEST_SCHEMASPACE} (includes invalid):"
- line_elements = [line.split() for line in lines[4:8]]
- assert line_elements[0][0] == "metadata-test"
- assert line_elements[0][1] == "another"
- assert line_elements[1][0] == "metadata-test"
- assert line_elements[1][1] == "another2"
- assert line_elements[2][0] == "metadata-test"
- assert line_elements[2][1] == "valid"
- assert line_elements[3][0] == "metadata-test"
- assert line_elements[3][1] == "valid2"
- # Remove the '2' runtimes and reconfirm smaller set
- metadata_manager.remove("valid2")
- metadata_manager.remove("another2")
- # Include two additional invalid files as well - one for uri failure, andother missing display_name
- metadata_dir = os.path.join(mock_data_dir, "metadata", METADATA_TEST_SCHEMASPACE)
- create_json_file(metadata_dir, "invalid.json", invalid_metadata_json)
- create_json_file(metadata_dir, "no_display_name.json", invalid_no_display_name_json)
- create_json_file(metadata_dir, "invalid_schema_name.json", invalid_schema_name_json)
- ret = script_runner.run("elyra-metadata", "list", METADATA_TEST_SCHEMASPACE)
- assert ret.success
- lines = ret.stdout.split("\n")
- assert len(lines) == 10 # always 5 more than the actual runtime count
- assert lines[0] == f"Available metadata instances for {METADATA_TEST_SCHEMASPACE} (includes invalid):"
- line_elements = [line.split() for line in lines[4:9]]
- assert line_elements[0][1] == "another"
- assert line_elements[1][1] == "invalid"
- assert line_elements[1][3] == "**INVALID**"
- assert line_elements[1][4] == "(ValidationError)"
- assert line_elements[2][3] == "**INVALID**"
- assert line_elements[2][4] == "(ValidationError)"
- assert line_elements[3][1] == "valid"
- assert line_elements[4][3] == "**INVALID**"
- assert line_elements[4][4] == "(SchemaNotFoundError)"
- ret = script_runner.run("elyra-metadata", "list", METADATA_TEST_SCHEMASPACE, "--valid-only")
- assert ret.success
- lines = ret.stdout.split("\n")
- assert len(lines) == 7 # always 5 more than the actual runtime count
- assert lines[0] == f"Available metadata instances for {METADATA_TEST_SCHEMASPACE} (valid only):"
- line_elements = [line.split() for line in lines[4:6]]
- assert line_elements[0][1] == "another"
- assert line_elements[1][1] == "valid"
- def test_list_json_instances(script_runner, mock_data_dir):
- metadata_manager = MetadataManager(schemaspace=METADATA_TEST_SCHEMASPACE)
- ret = script_runner.run("elyra-metadata", "list", METADATA_TEST_SCHEMASPACE, "--json")
- assert ret.success
- lines = ret.stdout.split("\n")
- assert len(lines) == 2
- assert lines[0] == "[]"
- valid = Metadata(**valid_metadata_json)
- resource = metadata_manager.create("valid", valid)
- assert resource is not None
- resource = metadata_manager.create("valid2", valid)
- assert resource is not None
- another = Metadata(**another_metadata_json)
- resource = metadata_manager.create("another", another)
- assert resource is not None
- resource = metadata_manager.create("another2", another)
- assert resource is not None
- ret = script_runner.run("elyra-metadata", "list", METADATA_TEST_SCHEMASPACE, "--json")
- assert ret.success
- # Consume results
- results = json.loads(ret.stdout)
- assert len(results) == 4
- # Remove the '2' runtimes and reconfirm smaller set
- metadata_manager.remove("valid2")
- metadata_manager.remove("another2")
- # Include two additional invalid files as well - one for uri failure, andother missing display_name
- metadata_dir = os.path.join(mock_data_dir, "metadata", METADATA_TEST_SCHEMASPACE)
- create_json_file(metadata_dir, "invalid.json", invalid_metadata_json)
- create_json_file(metadata_dir, "no_display_name.json", invalid_no_display_name_json)
- ret = script_runner.run("elyra-metadata", "list", METADATA_TEST_SCHEMASPACE, "--json")
- assert ret.success
- results = json.loads(ret.stdout)
- assert len(results) == 4
- ret = script_runner.run("elyra-metadata", "list", METADATA_TEST_SCHEMASPACE, "--json", "--valid-only")
- assert ret.success
- results = json.loads(ret.stdout)
- assert len(results) == 2
- def test_remove_help(script_runner):
- ret = script_runner.run("elyra-metadata", "remove", METADATA_TEST_SCHEMASPACE, "--help")
- assert ret.success is False
- assert f"Remove a metadata instance from schemaspace '{METADATA_TEST_SCHEMASPACE}'." in ret.stdout
- def test_remove_no_name(script_runner):
- ret = script_runner.run("elyra-metadata", "remove", METADATA_TEST_SCHEMASPACE)
- assert ret.success is False
- assert "ERROR: '--name' is a required parameter." in ret.stdout
- def test_remove_with_no_equals(script_runner):
- # Attempt removal w/o the '=' between parameter and value
- metadata_manager = MetadataManager(schemaspace=METADATA_TEST_SCHEMASPACE)
- valid = Metadata(**valid_metadata_json)
- metadata_manager.create("valid", valid)
- ret = script_runner.run("elyra-metadata", "remove", METADATA_TEST_SCHEMASPACE, "--name", "valid")
- assert ret.success is True
- def test_remove_missing(script_runner, mock_data_dir):
- # Create an instance so that the schemaspace exists.
- metadata_manager = MetadataManager(schemaspace=METADATA_TEST_SCHEMASPACE)
- valid = Metadata(**valid_metadata_json)
- metadata_manager.create("valid", valid)
- ret = script_runner.run("elyra-metadata", "remove", METADATA_TEST_SCHEMASPACE, "--name=missing")
- assert ret.success is False
- assert "No such instance named 'missing' was found in the metadata-tests schemaspace." in ret.stdout
- # Now cleanup original instance.
- ret = script_runner.run("elyra-metadata", "remove", METADATA_TEST_SCHEMASPACE, "--name=valid")
- assert ret.success
- def test_remove_instance(script_runner, mock_data_dir):
- metadata_manager = MetadataManager(schemaspace=METADATA_TEST_SCHEMASPACE)
- valid = Metadata(**valid_metadata_json)
- resource = metadata_manager.create("valid", valid)
- assert resource is not None
- resource = metadata_manager.create("valid2", valid)
- assert resource is not None
- another = Metadata(**another_metadata_json)
- resource = metadata_manager.create("another", another)
- assert resource is not None
- resource = metadata_manager.create("another2", another)
- assert resource is not None
- ret = script_runner.run("elyra-metadata", "remove", METADATA_TEST_SCHEMASPACE, "--name=valid")
- assert ret.success
- ret = script_runner.run("elyra-metadata", "remove", METADATA_TEST_SCHEMASPACE, "--name=another")
- assert ret.success
- instances = metadata_manager.get_all()
- assert len(instances) == 2
- assert instances[0].name.endswith("2")
- assert instances[1].name.endswith("2")
- def test_export_help(script_runner):
- ret = script_runner.run("elyra-metadata", "export", METADATA_TEST_SCHEMASPACE, "--help")
- assert ret.success is False
- assert f"Export installed metadata in schemaspace '{METADATA_TEST_SCHEMASPACE}'" in ret.stdout
- def test_export_no_directory(script_runner):
- ret = script_runner.run("elyra-metadata", "export", METADATA_TEST_SCHEMASPACE)
- assert ret.success is False
- assert "'--directory' is a required parameter." in ret.stdout
- def test_export_bad_argument(script_runner):
- ret = script_runner.run(
- "elyra-metadata", "export", METADATA_TEST_SCHEMASPACE, "--directory=dummy-directory", "--bogus-argument"
- )
- assert ret.success is False
- assert "The following arguments were unexpected: ['--bogus-argument']" in ret.stdout
- def test_export_bad_schemaspace(script_runner):
- ret = script_runner.run("elyra-metadata", "export", "bogus-schemaspace")
- assert ret.success is False
- assert "Subcommand 'bogus-schemaspace' is invalid." in ret.stdout
- def test_export_bad_schema(script_runner):
- ret = script_runner.run(
- "elyra-metadata",
- "export",
- METADATA_TEST_SCHEMASPACE,
- "--directory=dummy-directory",
- "--schema_name=bogus-schema",
- )
- assert (
- "Schema name 'bogus-schema' is invalid. For the 'metadata-tests' schemaspace, "
- "the schema name must be one of ['metadata-test', 'metadata-test2']" in ret.stdout
- )
- assert ret.success is False
- def test_export_no_schema_no_instances(script_runner, mock_data_dir):
- ret = script_runner.run("elyra-metadata", "export", METADATA_TEST_SCHEMASPACE, "--directory=dummy-directory")
- assert ret.success
- assert f"No metadata instances found for schemaspace '{METADATA_TEST_SCHEMASPACE}'" in ret.stdout
- assert "Nothing exported to 'dummy-directory'" in ret.stdout
- def test_export_inaccessible_directory(script_runner, mock_data_dir):
- metadata_manager = MetadataManager(schemaspace=METADATA_TEST_SCHEMASPACE)
- # create metadata
- valid = Metadata(**valid_metadata_json)
- resource = metadata_manager.create("valid", valid)
- assert resource is not None
- directory_parameter = "/dummy-directory"
- ret = script_runner.run("elyra-metadata", "export", METADATA_TEST_SCHEMASPACE, f"--directory={directory_parameter}")
- assert ret.success is False
- assert f"Error creating directory structure for '{directory_parameter}/{METADATA_TEST_SCHEMASPACE}': " in ret.stdout
- assert any(ele in ret.stdout for ele in ["Read-only file system: ", "Permission denied: ", "Access Denied: "])
- assert f"'{directory_parameter}'" in ret.stdout
- def test_export_with_schema_no_instances(script_runner, mock_data_dir):
- metadata_manager = MetadataManager(schemaspace=METADATA_TEST_SCHEMASPACE)
- # create metadata in a different schema
- valid = Metadata(**valid_metadata_json)
- resource = metadata_manager.create("valid", valid)
- assert resource is not None
- ret = script_runner.run(
- "elyra-metadata",
- "export",
- METADATA_TEST_SCHEMASPACE,
- "--schema_name=metadata-test2",
- "--directory=dummy-directory",
- )
- assert ret.success
- assert (
- f"No metadata instances found for schemaspace '{METADATA_TEST_SCHEMASPACE}' "
- f"and schema 'metadata-test2'" in ret.stdout
- )
- assert "Nothing exported to 'dummy-directory'" in ret.stdout
- def test_export_no_schema_with_instances(script_runner, mock_data_dir):
- metadata_manager = MetadataManager(schemaspace=METADATA_TEST_SCHEMASPACE)
- # create valid metadata
- valid = Metadata(**valid_metadata_json)
- resource = metadata_manager.create("valid", valid)
- assert resource is not None
- # create invalid metadata
- metadata_dir = os.path.join(mock_data_dir, "metadata", METADATA_TEST_SCHEMASPACE)
- create_json_file(metadata_dir, "invalid.json", invalid_metadata_json)
- create_json_file(metadata_dir, "invalid2.json", invalid_metadata_json)
- # test for valid and invalid
- temp_dir = TemporaryDirectory()
- directory_parameter = temp_dir.name
- ret = script_runner.run(
- "elyra-metadata", "export", METADATA_TEST_SCHEMASPACE, "--include-invalid", f"--directory={directory_parameter}"
- )
- assert ret.success
- export_directory = os.path.join(directory_parameter, METADATA_TEST_SCHEMASPACE)
- assert f"Creating directory structure for '{export_directory}'" in ret.stdout
- assert (
- f"Exporting metadata instances for schemaspace '{METADATA_TEST_SCHEMASPACE}' "
- f"(includes invalid) to '{export_directory}'" in ret.stdout
- )
- assert "Exported 3 instances (2 of which are invalid)" in ret.stdout
- exported_metadata = sorted(os.listdir(export_directory), key=str.casefold)
- assert len(exported_metadata) == 3
- assert exported_metadata[0] == "invalid.json"
- assert exported_metadata[1] == "invalid2.json"
- assert exported_metadata[2] == "valid.json"
- temp_dir.cleanup()
- # test for valid and invalid using '--include-invalid' option, which
- # prior to version 4.0 is a no-op
- temp_dir = TemporaryDirectory()
- directory_parameter = temp_dir.name
- ret = script_runner.run(
- "elyra-metadata", "export", METADATA_TEST_SCHEMASPACE, f"--directory={directory_parameter}", "--include-invalid"
- )
- assert ret.success
- export_directory = os.path.join(directory_parameter, METADATA_TEST_SCHEMASPACE)
- assert f"Creating directory structure for '{export_directory}'" in ret.stdout
- assert (
- f"Exporting metadata instances for schemaspace '{METADATA_TEST_SCHEMASPACE}' "
- f"(includes invalid) to '{export_directory}'" in ret.stdout
- )
- assert "Exported 3 instances (2 of which are invalid)" in ret.stdout
- exported_metadata = sorted(os.listdir(export_directory), key=str.casefold)
- assert len(exported_metadata) == 3
- assert exported_metadata[0] == "invalid.json"
- assert exported_metadata[1] == "invalid2.json"
- assert exported_metadata[2] == "valid.json"
- temp_dir.cleanup()
- # test for valid only
- temp_dir = TemporaryDirectory()
- directory_parameter = temp_dir.name
- ret = script_runner.run("elyra-metadata", "export", METADATA_TEST_SCHEMASPACE, f"--directory={directory_parameter}")
- assert ret.success
- export_directory = os.path.join(directory_parameter, METADATA_TEST_SCHEMASPACE)
- assert f"Creating directory structure for '{export_directory}'" in ret.stdout
- assert (
- f"Exporting metadata instances for schemaspace '{METADATA_TEST_SCHEMASPACE}' "
- f"(valid only) to '{export_directory}'" in ret.stdout
- )
- assert "Exported 1 instance (0 of which are invalid)" in ret.stdout
- exported_metadata = os.listdir(export_directory)
- assert len(exported_metadata) == 1
- assert exported_metadata[0] == "valid.json"
- temp_dir.cleanup()
- def test_export_with_schema_with_instances(script_runner, mock_data_dir):
- metadata_manager = MetadataManager(schemaspace=METADATA_TEST_SCHEMASPACE)
- # create valid metadata
- valid = Metadata(**valid_metadata_json)
- resource = metadata_manager.create("valid", valid)
- assert resource is not None
- valid = Metadata(**valid_metadata2_json)
- resource = metadata_manager.create("valid2", valid)
- assert resource is not None
- # create invalid metadata
- metadata_dir = os.path.join(mock_data_dir, "metadata", METADATA_TEST_SCHEMASPACE)
- create_json_file(metadata_dir, "invalid.json", invalid_metadata_json)
- # create export directory
- temp_dir = TemporaryDirectory()
- directory_parameter = temp_dir.name
- # test for valid and invalid
- ret = script_runner.run(
- "elyra-metadata",
- "export",
- METADATA_TEST_SCHEMASPACE,
- "--include-invalid",
- "--schema_name=metadata-test",
- f"--directory={directory_parameter}",
- )
- assert ret.success
- export_directory = os.path.join(directory_parameter, METADATA_TEST_SCHEMASPACE)
- assert f"Creating directory structure for '{export_directory}'" in ret.stdout
- assert (
- f"Exporting metadata instances for schemaspace '{METADATA_TEST_SCHEMASPACE}'"
- f" and schema 'metadata-test' (includes invalid) to '{export_directory}'" in ret.stdout
- )
- assert "Exported 2 instances (1 of which is invalid)" in ret.stdout
- exported_metadata = sorted(os.listdir(export_directory), key=str.casefold)
- assert len(exported_metadata) == 2
- assert exported_metadata[0] == "invalid.json"
- assert exported_metadata[1] == "valid.json"
- temp_dir.cleanup()
- # create export directory
- temp_dir = TemporaryDirectory()
- directory_parameter = temp_dir.name
- # test for valid only
- ret = script_runner.run(
- "elyra-metadata",
- "export",
- METADATA_TEST_SCHEMASPACE,
- "--schema_name=metadata-test",
- f"--directory={directory_parameter}",
- )
- assert ret.success
- export_directory = os.path.join(directory_parameter, METADATA_TEST_SCHEMASPACE)
- assert f"Creating directory structure for '{export_directory}'" in ret.stdout
- assert (
- f"Exporting metadata instances for schemaspace '{METADATA_TEST_SCHEMASPACE}'"
- f" and schema 'metadata-test' (valid only) to '{export_directory}'" in ret.stdout
- )
- assert "Exported 1 instance (0 of which are invalid)" in ret.stdout
- exported_metadata = os.listdir(export_directory)
- assert len(exported_metadata) == 1
- assert exported_metadata[0] == "valid.json"
- temp_dir.cleanup()
- def test_export_without_clean(script_runner, mock_data_dir):
- metadata_manager = MetadataManager(schemaspace=METADATA_TEST_SCHEMASPACE)
- # create valid metadata
- valid = Metadata(**valid_metadata_json)
- resource = metadata_manager.create("valid", valid)
- assert resource is not None
- # create export directory
- temp_dir = TemporaryDirectory()
- directory_parameter = temp_dir.name
- export_directory = os.path.join(directory_parameter, METADATA_TEST_SCHEMASPACE)
- os.mkdir(export_directory)
- # create metadata file with dummy data and verify its contents
- dummy_json = {"required_test": "required_value"}
- metadata_filename = "valid.json"
- create_json_file(export_directory, metadata_filename, dummy_json)
- metadata_file_path = os.path.join(export_directory, metadata_filename)
- assert os.path.exists(metadata_file_path)
- assert json.loads(open(metadata_file_path).read()) == dummy_json
- # create additional dummy file with a different name and verify its contents
- dummy_filename = "dummy.json"
- create_json_file(export_directory, dummy_filename, dummy_json)
- dummy_file_path = os.path.join(export_directory, dummy_filename)
- assert os.path.exists(dummy_file_path)
- assert json.loads(open(dummy_file_path).read()) == dummy_json
- # create dummy file under different folder (different schema) and verify its contents
- export_directory_other = os.path.join(directory_parameter, "runtimes")
- os.mkdir(export_directory_other)
- dummy_filename_other = "dummy.json"
- create_json_file(export_directory_other, dummy_filename_other, dummy_json)
- dummy_file_path_other = os.path.join(export_directory_other, dummy_filename_other)
- assert os.path.exists(dummy_file_path_other)
- assert json.loads(open(dummy_file_path_other).read()) == dummy_json
- # export metadata without --clean flag
- ret = script_runner.run(
- "elyra-metadata",
- "export",
- METADATA_TEST_SCHEMASPACE,
- "--schema_name=metadata-test",
- f"--directory={directory_parameter}",
- )
- assert ret.success
- assert f"Creating directory structure for '{export_directory}'" not in ret.stdout
- assert (
- f"Exporting metadata instances for schemaspace '{METADATA_TEST_SCHEMASPACE}'"
- f" and schema 'metadata-test' (valid only) to '{export_directory}'" in ret.stdout
- )
- assert "Exported 1 instance (0 of which are invalid)" in ret.stdout
- # verify that the metadata file was overwritten while both the dummy files were left as is
- export_directory_files = sorted(os.listdir(export_directory), key=str.casefold)
- assert len(export_directory_files) == 2
- assert export_directory_files[0] == dummy_filename
- assert json.loads(open(dummy_file_path).read()) == dummy_json
- assert export_directory_files[1] == metadata_filename
- exported_metadata = json.loads(open(metadata_file_path).read())
- assert "schema_name" in exported_metadata
- assert exported_metadata.get("schema_name") == valid_metadata_json.get("schema_name")
- export_directory_other_files = sorted(os.listdir(export_directory_other), key=str.casefold)
- assert len(export_directory_other_files) == 1
- assert export_directory_other_files[0] == dummy_filename_other
- assert json.loads(open(dummy_file_path_other).read()) == dummy_json
- temp_dir.cleanup()
- def test_export_clean(script_runner, mock_data_dir):
- metadata_manager = MetadataManager(schemaspace=METADATA_TEST_SCHEMASPACE)
- # create valid metadata
- valid = Metadata(**valid_metadata_json)
- resource = metadata_manager.create("valid", valid)
- assert resource is not None
- # create export directory
- temp_dir = TemporaryDirectory()
- directory_parameter = temp_dir.name
- export_directory = os.path.join(directory_parameter, METADATA_TEST_SCHEMASPACE)
- os.mkdir(export_directory)
- # create metadata file with dummy data and verify its contents
- dummy_json = {"required_test": "required_value"}
- metadata_filename = "valid.json"
- create_json_file(export_directory, metadata_filename, dummy_json)
- metadata_file_path = os.path.join(export_directory, metadata_filename)
- assert os.path.exists(metadata_file_path)
- assert json.loads(open(metadata_file_path).read()) == dummy_json
- # create additional dummy file with a different name and verify its contents
- dummy_filename = "dummy.json"
- create_json_file(export_directory, dummy_filename, dummy_json)
- dummy_file_path = os.path.join(export_directory, dummy_filename)
- assert os.path.exists(dummy_file_path)
- assert json.loads(open(dummy_file_path).read()) == dummy_json
- # create dummy file under different folder (different schema) and verify its contents
- export_directory_other = os.path.join(directory_parameter, "runtimes")
- os.mkdir(export_directory_other)
- dummy_filename_other = "dummy.json"
- create_json_file(export_directory_other, dummy_filename_other, dummy_json)
- dummy_file_path_other = os.path.join(export_directory_other, dummy_filename_other)
- assert os.path.exists(dummy_file_path_other)
- assert json.loads(open(dummy_file_path_other).read()) == dummy_json
- # export metadata with --clean flag
- ret = script_runner.run(
- "elyra-metadata",
- "export",
- METADATA_TEST_SCHEMASPACE,
- "--clean",
- "--schema_name=metadata-test",
- f"--directory={directory_parameter}",
- )
- assert ret.success
- assert f"Creating directory structure for '{export_directory}'" not in ret.stdout
- assert f"Cleaning out all files in '{export_directory}'" in ret.stdout
- assert (
- f"Exporting metadata instances for schemaspace '{METADATA_TEST_SCHEMASPACE}'"
- f" and schema 'metadata-test' (valid only) to '{export_directory}'" in ret.stdout
- )
- assert "Exported 1 instance (0 of which are invalid)" in ret.stdout
- # verify that the metadata file was overwritten and dummy file within the same schema folder was deleted
- # whereas the dummy file within the other schema folder was left as is
- export_directory_files = os.listdir(export_directory)
- assert len(export_directory_files) == 1
- assert export_directory_files[0] == metadata_filename
- exported_metadata = json.loads(open(metadata_file_path).read())
- assert "schema_name" in exported_metadata
- assert exported_metadata.get("schema_name") == valid_metadata_json.get("schema_name")
- export_directory_other_files = sorted(os.listdir(export_directory_other), key=str.casefold)
- assert len(export_directory_other_files) == 1
- assert export_directory_other_files[0] == dummy_filename_other
- assert json.loads(open(dummy_file_path_other).read()) == dummy_json
- temp_dir.cleanup()
- def test_import_help(script_runner):
- ret = script_runner.run("elyra-metadata", "import", METADATA_TEST_SCHEMASPACE, "--help")
- assert ret.success is False
- assert f"\nImport metadata instances into schemaspace '{METADATA_TEST_SCHEMASPACE}'" in ret.stdout
- def test_import_no_directory(script_runner):
- ret = script_runner.run("elyra-metadata", "import", METADATA_TEST_SCHEMASPACE)
- assert ret.success is False
- assert "ERROR: '--directory' is a required parameter." in ret.stdout
- def test_import_bad_argument(script_runner):
- ret = script_runner.run(
- "elyra-metadata", "import", METADATA_TEST_SCHEMASPACE, "--directory=dummy-directory", "--bogus-argument"
- )
- assert ret.success is False
- assert "ERROR: The following arguments were unexpected: ['--bogus-argument']" in ret.stdout
- def test_import_bad_schemaspace(script_runner):
- ret = script_runner.run("elyra-metadata", "import", "bogus-schemaspace")
- assert ret.success is False
- assert "Subcommand 'bogus-schemaspace' is invalid." in ret.stdout
- def test_import_inaccessible_directory(script_runner):
- directory_parameter = "/dummy-directory"
- ret = script_runner.run("elyra-metadata", "import", METADATA_TEST_SCHEMASPACE, f"--directory={directory_parameter}")
- assert ret.success is False
- assert (
- f"Unable to reach the '{directory_parameter}'"
- f" directory: No such file or directory: '{directory_parameter}" in ret.stdout
- )
- def test_import_empty_directory(script_runner):
- # create import directory
- temp_dir = TemporaryDirectory()
- directory_parameter = temp_dir.name
- # import metadata
- ret = script_runner.run("elyra-metadata", "import", METADATA_TEST_SCHEMASPACE, f"--directory={directory_parameter}")
- assert ret.success is True
- assert f"No instances for import found in the '{directory_parameter}' directory" in ret.stdout
- temp_dir.cleanup()
- def test_import_non_json_file(script_runner):
- # create import directory
- temp_dir = TemporaryDirectory()
- directory_parameter = temp_dir.name
- # add a dummy file in the directory
- dummy_filename = "dummy.txt"
- dummy_filepath = os.path.join(directory_parameter, dummy_filename)
- dummy_file_content = "This is a dummy txt file."
- with open(dummy_filepath, "w") as f:
- f.write(dummy_file_content)
- assert os.path.exists(dummy_filepath)
- assert open(dummy_filepath).read() == dummy_file_content
- # import metadata
- ret = script_runner.run("elyra-metadata", "import", METADATA_TEST_SCHEMASPACE, f"--directory={directory_parameter}")
- assert ret.success is True
- assert f"No instances for import found in the '{directory_parameter}' directory" in ret.stdout
- temp_dir.cleanup()
- def test_import_valid_metadata_files(script_runner, mock_data_dir):
- # create import directory
- temp_dir = TemporaryDirectory()
- directory_parameter = temp_dir.name
- # add valid metadata json files in the directory
- metadata_filename = "valid.json"
- metadata_file_path = os.path.join(directory_parameter, metadata_filename)
- with open(metadata_file_path, "w") as f:
- json.dump(valid_metadata_json, f)
- assert os.path.exists(metadata_file_path)
- assert json.loads(open(metadata_file_path).read()) == valid_metadata_json
- metadata_filename2 = "valid2.json"
- metadata_file_path2 = os.path.join(directory_parameter, metadata_filename2)
- with open(metadata_file_path2, "w") as f:
- json.dump(valid_metadata2_json, f)
- assert os.path.exists(metadata_file_path2)
- assert json.loads(open(metadata_file_path2).read()) == valid_metadata2_json
- # import metadata
- ret = script_runner.run("elyra-metadata", "import", METADATA_TEST_SCHEMASPACE, f"--directory={directory_parameter}")
- assert ret.success is True
- assert "Imported 2 instances" in ret.stdout
- temp_dir.cleanup()
- # verify contents of imported metadata
- metadata_directory = os.path.join(mock_data_dir, "metadata", METADATA_TEST_SCHEMASPACE)
- assert os.path.isdir(metadata_directory)
- installed_metadata_file_path = os.path.join(metadata_directory, metadata_filename)
- assert os.path.isfile(installed_metadata_file_path)
- with open(installed_metadata_file_path, "r") as metadata_file:
- instance_json = json.load(metadata_file)
- assert instance_json["schema_name"] == valid_metadata_json["schema_name"]
- assert instance_json["display_name"] == valid_metadata_json["display_name"]
- assert instance_json["metadata"]["required_test"] == valid_metadata_json["metadata"]["required_test"]
- assert instance_json["metadata"]["uri_test"] == valid_metadata_json["metadata"]["uri_test"]
- assert instance_json["metadata"]["number_range_test"] == valid_metadata_json["metadata"]["number_range_test"]
- installed_metadata_file_path = os.path.join(metadata_directory, metadata_filename2)
- assert os.path.isfile(installed_metadata_file_path)
- with open(installed_metadata_file_path, "r") as metadata_file:
- instance_json = json.load(metadata_file)
- assert instance_json["schema_name"] == valid_metadata2_json["schema_name"]
- assert instance_json["display_name"] == valid_metadata2_json["display_name"]
- assert instance_json["metadata"]["required_test"] == valid_metadata2_json["metadata"]["required_test"]
- assert instance_json["metadata"]["uri_test"] == valid_metadata2_json["metadata"]["uri_test"]
- assert instance_json["metadata"]["number_range_test"] == valid_metadata2_json["metadata"]["number_range_test"]
- def test_import_invalid_metadata_file(script_runner, mock_data_dir):
- # create import directory
- temp_dir = TemporaryDirectory()
- directory_parameter = temp_dir.name
- # add invalid metadata json file in the directory
- metadata_filename = "invalid.json"
- metadata_file_path = os.path.join(directory_parameter, metadata_filename)
- with open(metadata_file_path, "w") as f:
- json.dump(invalid_metadata_json, f)
- assert os.path.exists(metadata_file_path)
- assert json.loads(open(metadata_file_path).read()) == invalid_metadata_json
- # import metadata
- ret = script_runner.run("elyra-metadata", "import", METADATA_TEST_SCHEMASPACE, f"--directory={directory_parameter}")
- assert ret.success is True
- lines = ret.stdout.split("\n")
- assert len(lines) == 8
- assert "Imported 0 instances" in lines[0]
- assert "1 instance could not be imported" in lines[1]
- assert "The following files could not be imported:" in lines[3]
- assert lines[6].startswith("invalid.json")
- assert (
- lines[6]
- .strip()
- .endswith(
- "Validation failed for instance 'invalid' using the metadata-test "
- + "schema with error: '//localhost:8081/' is not a 'uri'."
- )
- )
- temp_dir.cleanup()
- def test_import_with_subfolder(script_runner, mock_data_dir):
- # create import directory
- temp_dir = TemporaryDirectory()
- directory_parameter = temp_dir.name
- # add valid metadata json file in the import directory
- metadata_filename = "valid.json"
- metadata_file_path = os.path.join(directory_parameter, metadata_filename)
- with open(metadata_file_path, "w") as f:
- json.dump(valid_metadata_json, f)
- assert os.path.exists(metadata_file_path)
- assert json.loads(open(metadata_file_path).read()) == valid_metadata_json
- # add invalid metadata json files in the directory
- invalid_metadata_filename = "invalid.json"
- invalid_metadata_file_path = os.path.join(directory_parameter, invalid_metadata_filename)
- with open(invalid_metadata_file_path, "w") as f:
- json.dump(invalid_metadata_json, f)
- assert os.path.exists(invalid_metadata_file_path)
- assert json.loads(open(invalid_metadata_file_path).read()) == invalid_metadata_json
- invalid_metadata_file_path2 = os.path.join(directory_parameter, "invalid2.json")
- shutil.copyfile(invalid_metadata_file_path, invalid_metadata_file_path2)
- assert os.path.exists(invalid_metadata_file_path2)
- assert json.loads(open(invalid_metadata_file_path2).read()) == invalid_metadata_json
- # create a sub-folder within import directory and add a valid metadata file in it
- os.mkdir(os.path.join(directory_parameter, "subfolder"))
- metadata_filename2 = "valid2.json"
- metadata_file_path2 = os.path.join(directory_parameter, "subfolder", metadata_filename2)
- with open(metadata_file_path2, "w") as f:
- json.dump(valid_metadata2_json, f)
- assert os.path.exists(metadata_file_path2)
- assert json.loads(open(metadata_file_path2).read()) == valid_metadata2_json
- # import metadata
- ret = script_runner.run("elyra-metadata", "import", METADATA_TEST_SCHEMASPACE, f"--directory={directory_parameter}")
- assert ret.success is True
- lines = ret.stdout.split("\n")
- assert len(lines) == 9
- assert "Imported 1 instance" in lines[0]
- assert "2 instances could not be imported" in lines[1]
- assert "The following files could not be imported:" in lines[3]
- assert lines[6].startswith("invalid.json")
- assert (
- lines[6]
- .strip()
- .endswith(
- "Validation failed for instance 'invalid' using the metadata-test "
- + "schema with error: '//localhost:8081/' is not a 'uri'."
- )
- )
- assert lines[7].startswith("invalid2.json")
- assert (
- lines[7]
- .strip()
- .endswith(
- "Validation failed for instance 'invalid2' using the metadata-test "
- + "schema with error: '//localhost:8081/' is not a 'uri'."
- )
- )
- temp_dir.cleanup()
- # verify contents of imported metadata
- assert os.path.isdir(os.path.join(mock_data_dir, "metadata", METADATA_TEST_SCHEMASPACE))
- installed_metadata_file_path = os.path.join(mock_data_dir, "metadata", METADATA_TEST_SCHEMASPACE, metadata_filename)
- assert os.path.isfile(installed_metadata_file_path)
- with open(installed_metadata_file_path, "r") as metadata_file:
- instance_json = json.load(metadata_file)
- assert instance_json["schema_name"] == valid_metadata_json["schema_name"]
- assert instance_json["display_name"] == valid_metadata_json["display_name"]
- assert instance_json["metadata"]["required_test"] == valid_metadata_json["metadata"]["required_test"]
- assert instance_json["metadata"]["uri_test"] == valid_metadata_json["metadata"]["uri_test"]
- assert instance_json["metadata"]["number_range_test"] == valid_metadata_json["metadata"]["number_range_test"]
- def test_import_overwrite_flag(script_runner, mock_data_dir):
- metadata_manager = MetadataManager(schemaspace=METADATA_TEST_SCHEMASPACE)
- # create valid metadata
- valid = Metadata(**valid_metadata_json)
- resource = metadata_manager.create("valid", valid)
- assert resource is not None
- # create import directory
- temp_dir = TemporaryDirectory()
- directory_parameter = temp_dir.name
- # add valid metadata json file for existing metadata in the import directory
- metadata_filename = "valid.json"
- metadata_file_path = os.path.join(directory_parameter, metadata_filename)
- with open(metadata_file_path, "w") as f:
- json.dump(valid_metadata_json, f)
- assert os.path.exists(metadata_file_path)
- assert json.loads(open(metadata_file_path).read()) == valid_metadata_json
- # add valid metadata json file for new metadata
- metadata_filename2 = "valid2.json"
- metadata_file_path2 = os.path.join(directory_parameter, metadata_filename2)
- with open(metadata_file_path2, "w") as f:
- json.dump(valid_metadata_json, f)
- assert os.path.exists(metadata_file_path2)
- assert json.loads(open(metadata_file_path2).read()) == valid_metadata_json
- # import metadata without overwrite flag
- ret = script_runner.run("elyra-metadata", "import", METADATA_TEST_SCHEMASPACE, f"--directory={directory_parameter}")
- assert ret.success is True
- lines = ret.stdout.split("\n")
- assert len(lines) == 8
- assert "Imported 1 instance" in lines[0]
- assert "1 instance could not be imported" in lines[1]
- assert "The following files could not be imported:" in lines[3]
- assert lines[6].startswith("valid.json")
- assert (
- lines[6]
- .strip()
- .endswith(
- "An instance named 'valid' already exists in the metadata-tests "
- + "schemaspace. Use '--overwrite' to update."
- )
- )
- # verify contents of imported metadata
- assert os.path.isdir(os.path.join(mock_data_dir, "metadata", METADATA_TEST_SCHEMASPACE))
- installed_metadata_file_path2 = os.path.join(
- mock_data_dir, "metadata", METADATA_TEST_SCHEMASPACE, metadata_filename2
- )
- assert os.path.isfile(installed_metadata_file_path2)
- with open(installed_metadata_file_path2, "r") as metadata_file:
- instance_json = json.load(metadata_file)
- assert instance_json["schema_name"] == valid_metadata_json["schema_name"]
- assert instance_json["display_name"] == valid_metadata_json["display_name"]
- assert instance_json["metadata"]["required_test"] == valid_metadata_json["metadata"]["required_test"]
- assert instance_json["metadata"]["uri_test"] == valid_metadata_json["metadata"]["uri_test"]
- assert instance_json["metadata"]["number_range_test"] == valid_metadata_json["metadata"]["number_range_test"]
- # replace one of the metadata files with new content
- os.remove(metadata_file_path2)
- assert os.path.exists(metadata_file_path2) is False
- with open(metadata_file_path2, "w") as f:
- json.dump(valid_display_name_json, f)
- assert os.path.exists(metadata_file_path2)
- assert json.loads(open(metadata_file_path2).read()) == valid_display_name_json
- # add another valid metadata json file for new metadata
- metadata_filename3 = "another.json"
- metadata_file_path3 = os.path.join(directory_parameter, metadata_filename3)
- with open(metadata_file_path3, "w") as f:
- json.dump(another_metadata_json, f)
- assert os.path.exists(metadata_file_path3)
- assert json.loads(open(metadata_file_path3).read()) == another_metadata_json
- # re-try import metadata with overwrite flag
- ret = script_runner.run(
- "elyra-metadata",
- "import",
- METADATA_TEST_SCHEMASPACE,
- f"--directory={directory_parameter}",
- "--overwrite",
- )
- assert ret.success is True
- assert "Imported 3 instances" in ret.stdout
- temp_dir.cleanup()
- # verify contents of existing (unchanged) metadata
- assert os.path.isdir(os.path.join(mock_data_dir, "metadata", METADATA_TEST_SCHEMASPACE))
- installed_metadata_file_path = os.path.join(mock_data_dir, "metadata", METADATA_TEST_SCHEMASPACE, metadata_filename)
- assert os.path.isfile(installed_metadata_file_path)
- with open(installed_metadata_file_path, "r") as metadata_file:
- instance_json = json.load(metadata_file)
- assert instance_json["schema_name"] == valid_metadata_json["schema_name"]
- assert instance_json["display_name"] == valid_metadata_json["display_name"]
- assert instance_json["metadata"]["required_test"] == valid_metadata_json["metadata"]["required_test"]
- assert instance_json["metadata"]["uri_test"] == valid_metadata_json["metadata"]["uri_test"]
- assert instance_json["metadata"]["number_range_test"] == valid_metadata_json["metadata"]["number_range_test"]
- # verify contents of overwritten metadata
- installed_metadata_filepath2 = os.path.join(
- mock_data_dir, "metadata", METADATA_TEST_SCHEMASPACE, metadata_filename2
- )
- assert os.path.isfile(installed_metadata_filepath2)
- with open(installed_metadata_filepath2, "r") as metadata_file:
- instance_json = json.load(metadata_file)
- assert instance_json["schema_name"] == valid_display_name_json["schema_name"]
- assert instance_json["display_name"] == valid_display_name_json["display_name"]
- assert instance_json["metadata"]["required_test"] == valid_display_name_json["metadata"]["required_test"]
- # verify contents of new imported metadata
- installed_metadata_filepath3 = os.path.join(
- mock_data_dir, "metadata", METADATA_TEST_SCHEMASPACE, metadata_filename3
- )
- assert os.path.isfile(installed_metadata_filepath3)
- with open(installed_metadata_filepath3, "r") as metadata_file:
- instance_json = json.load(metadata_file)
- assert instance_json["schema_name"] == another_metadata_json["schema_name"]
- assert instance_json["display_name"] == another_metadata_json["display_name"]
- assert instance_json["metadata"]["required_test"] == another_metadata_json["metadata"]["required_test"]
- assert instance_json["metadata"]["uri_test"] == another_metadata_json["metadata"]["uri_test"]
- # Begin property tests...
- def test_required(script_runner, mock_data_dir):
- # Doesn't use PropertyTester due to its unique test since all other tests require this property
- name = "required"
- expected_file = os.path.join(mock_data_dir, "metadata", METADATA_TEST_SCHEMASPACE, name + ".json")
- # Cleanup from any potential previous failures
- if os.path.exists(expected_file):
- os.remove(expected_file)
- ret = script_runner.run(
- "elyra-metadata",
- "install",
- METADATA_TEST_SCHEMASPACE,
- "--schema_name=metadata-test",
- "--name=" + name,
- "--display_name=" + name,
- )
- assert ret.success is False
- assert "'--required_test' is a required parameter" in ret.stdout
- ret = script_runner.run(
- "elyra-metadata",
- "install",
- METADATA_TEST_SCHEMASPACE,
- "--schema_name=metadata-test",
- "--name=" + name,
- "--display_name=" + name,
- "--required_test=required_value",
- )
- assert ret.success
- assert "Metadata instance '" + name + "' for schema 'metadata-test' has been written" in ret.stdout
- assert os.path.isdir(os.path.join(mock_data_dir, "metadata", METADATA_TEST_SCHEMASPACE))
- assert os.path.isfile(expected_file)
- with open(expected_file, "r") as fd:
- instance_json = json.load(fd)
- assert instance_json["schema_name"] == "metadata-test"
- assert instance_json["display_name"] == name
- assert instance_json["metadata"]["required_test"] == "required_value"
- def test_number_default(script_runner, mock_data_dir):
- # Doesn't use PropertyTester due to its unique test (no failure, needs --replace, etc.)
- name = "number_default"
- expected_file = os.path.join(mock_data_dir, "metadata", METADATA_TEST_SCHEMASPACE, name + ".json")
- # Cleanup from any potential previous failures
- if os.path.exists(expected_file):
- os.remove(expected_file)
- # No negative test here. First create w/o a value and ensure 42, then create with a value and ensure that value.
- ret = script_runner.run(
- "elyra-metadata",
- "install",
- METADATA_TEST_SCHEMASPACE,
- "--schema_name=metadata-test",
- "--name=" + name,
- "--display_name=" + name,
- "--required_test=required_value",
- )
- assert ret.success
- assert "Metadata instance '" + name + "' for schema 'metadata-test' has been written" in ret.stdout
- assert os.path.isdir(os.path.join(mock_data_dir, "metadata", METADATA_TEST_SCHEMASPACE))
- assert os.path.isfile(expected_file)
- with open(expected_file, "r") as fd:
- instance_json = json.load(fd)
- assert instance_json["schema_name"] == "metadata-test"
- assert instance_json["display_name"] == name
- assert instance_json["metadata"]["number_default_test"] == 42
- # Note that we only include the properties that are changed, along with "identifiers" like name ans schema_name.
- ret = script_runner.run(
- "elyra-metadata",
- "install",
- METADATA_TEST_SCHEMASPACE,
- "--schema_name=metadata-test",
- "--name=" + name,
- "--replace",
- "--number_default_test=7.2",
- )
- assert ret.success
- assert "Metadata instance '" + name + "' for schema 'metadata-test' has been written" in ret.stdout
- assert os.path.isdir(os.path.join(mock_data_dir, "metadata", METADATA_TEST_SCHEMASPACE))
- assert os.path.isfile(expected_file)
- with open(expected_file, "r") as fd:
- instance_json = json.load(fd)
- assert instance_json["schema_name"] == "metadata-test"
- assert instance_json["display_name"] == name
- assert instance_json["metadata"]["number_default_test"] == 7.2
- def test_uri(script_runner, mock_data_dir):
- prop_test = PropertyTester("uri")
- prop_test.negative_value = "//invalid-uri"
- prop_test.negative_stdout = "'//invalid-uri' is not a 'uri'"
- # this can be joined with previous if adding meta-properties
- # "; title: URI Test, format: uri"
- prop_test.negative_stderr = "'//invalid-uri' is not a 'uri'"
- prop_test.positive_value = "http://localhost:31823/v1/models?version=2017-02-13"
- prop_test.run(script_runner, mock_data_dir)
- def test_integer_exclusivity(script_runner, mock_data_dir):
- prop_test = PropertyTester("integer_exclusivity")
- prop_test.negative_value = 3
- prop_test.negative_stdout = "3 is less than or equal to the minimum of 3"
- # this can be joined with previous if adding meta-properties
- # "; title: Integer Exclusivity Test, exclusiveMinimum: 3, exclusiveMaximum: 10"
- prop_test.negative_stderr = "3 is less than or equal to the minimum of 3"
- prop_test.positive_value = 7
- prop_test.run(script_runner, mock_data_dir)
- def test_integer_multiple(script_runner, mock_data_dir):
- prop_test = PropertyTester("integer_multiple")
- prop_test.negative_value = 32
- prop_test.negative_stdout = "32 is not a multiple of 6"
- # this can be joined with previous if adding meta-properties
- # "; title: Integer Multiple Test, multipleOf: 6"
- prop_test.negative_stderr = "32 is not a multiple of 6"
- prop_test.positive_value = 42
- prop_test.run(script_runner, mock_data_dir)
- def test_number_range(script_runner, mock_data_dir):
- prop_test = PropertyTester("number_range")
- prop_test.negative_value = 2.7
- prop_test.negative_stdout = "2.7 is less than the minimum of 3"
- # this can be joined with previous if adding meta-properties
- # "; title: Number Range Test, minimum: 3, maximum: 10"
- prop_test.negative_stderr = "2.7 is less than the minimum of 3"
- prop_test.positive_value = 7.2
- prop_test.run(script_runner, mock_data_dir)
- def test_const(script_runner, mock_data_dir):
- prop_test = PropertyTester("const")
- prop_test.negative_value = 2.718
- prop_test.negative_stdout = "3.14 was expected"
- # this can be joined with previous if adding meta-properties
- # " ; title: Const Test, const: 3.14"
- prop_test.negative_stderr = "3.14 was expected"
- prop_test.positive_value = 3.14
- prop_test.run(script_runner, mock_data_dir)
- def test_string_length(script_runner, mock_data_dir):
- prop_test = PropertyTester("string_length")
- prop_test.negative_value = "12345678901"
- prop_test.negative_stdout = "'12345678901' is too long"
- # this can be joined with previous if adding meta-properties
- # "; title: String Length Test, minLength: 3, maxLength: 10"
- prop_test.negative_stderr = "'12345678901' is too long"
- prop_test.positive_value = "123456"
- prop_test.run(script_runner, mock_data_dir)
- def test_string_pattern(script_runner, mock_data_dir):
- prop_test = PropertyTester("string_pattern") # Must start/end with alphanumeric, can include '-' and '.'
- prop_test.negative_value = "-foo1"
- prop_test.negative_stdout = "'-foo1' does not match '^[a-z0-9][a-z0-9-.]*[a-z0-9]$'"
- # this can be joined with previous if adding meta-properties
- # "; title: String Pattern Test, pattern: ^[a-z0-9][a-z0-9-.]*[a-z0-9]$"
- prop_test.negative_stderr = "'-foo1' does not match '^[a-z0-9][a-z0-9-.]*[a-z0-9]$'"
- prop_test.positive_value = "0foo-bar.com-01"
- prop_test.run(script_runner, mock_data_dir)
- def test_enum(script_runner, mock_data_dir):
- prop_test = PropertyTester("enum")
- prop_test.negative_value = "jupyter"
- prop_test.negative_stdout = "'jupyter' is not one of ['elyra', 'rocks', 'added']"
- # this can be joined with previous if adding meta-properties
- # "; title: Enum Test, enum: ['elyra', 'rocks', 'added']"
- prop_test.negative_stderr = "'jupyter' is not one of ['elyra', 'rocks', 'added']"
- prop_test.positive_value = "added"
- prop_test.run(script_runner, mock_data_dir)
- def test_array(script_runner, mock_data_dir):
- prop_test = PropertyTester("array")
- prop_test.negative_value = [1, 2, 2]
- prop_test.negative_stdout = "[1, 2, 2] has non-unique elements"
- # this can be joined with previous if adding meta-properties
- # "; title: Array Test, minItems: 3, maxItems: 10, uniqueItems: True"
- prop_test.negative_stderr = "[1, 2, 2] has non-unique elements"
- prop_test.positive_value = [1, 2, 3, 4, 5]
- prop_test.run(script_runner, mock_data_dir)
- def test_object(script_runner, mock_data_dir):
- prop_test = PropertyTester("object")
- prop_test.negative_value = {"prop1": 2, "prop2": 3}
- prop_test.negative_stdout = "{'prop1': 2, 'prop2': 3} does not have enough properties"
- # this can be joined with previous if adding meta-properties
- # "; title: Object Test, minProperties: 3, maxProperties: 10"
- prop_test.negative_stderr = "{'prop1': 2, 'prop2': 3} does not have enough properties"
- prop_test.positive_value = {"prop1": 2, "prop2": 3, "prop3": 4, "prop4": 5}
- prop_test.run(script_runner, mock_data_dir)
- def test_boolean(script_runner, mock_data_dir):
- prop_test = PropertyTester("boolean")
- prop_test.negative_value = "bogus_boolean"
- prop_test.negative_stdout = "'bogus_boolean' is not of type 'boolean'"
- # this can be joined with previous if adding meta-properties
- # "; title: Boolean Test"
- prop_test.negative_stderr = "'bogus_boolean' is not of type 'boolean'"
- prop_test.positive_value = True
- prop_test.run(script_runner, mock_data_dir)
- def test_null(script_runner, mock_data_dir):
- prop_test = PropertyTester("null")
- prop_test.negative_value = "bogus_null"
- prop_test.negative_stdout = "'bogus_null' is not of type 'null'"
- # this can be joined with previous if adding meta-properties
- # "; title: Null Test"
- prop_test.negative_stderr = "'bogus_null' is not of type 'null'"
- prop_test.positive_value = None
- prop_test.run(script_runner, mock_data_dir)
|