inputs='{"train_df": "train", "test_df": "test"}' outputs='["results_tmp_11_10_0"]' # results_tmp_{task_id}_{sub_node_id}_{output_pin_id} sourceDIR=/home/sxkj/bigdata ${SPARK_HOME}/bin/spark-submit \ --master yarn \ --name "spark_demo_1009" \ --deploy-mode cluster \ --driver-memory 1g \ --driver-cores 1 \ --executor-memory 1g \ --executor-cores 1 \ --num-executors 1 \ --archives hdfs://192.168.199.27:9000/tmp/lyl/py37.zip#python3env \ --conf spark.default.parallelism=1 \ --conf spark.executor.memoryOverhead=1g \ --conf spark.driver.memoryOverhead=1g \ --conf spark.yarn.maxAppAttempts=3 \ --conf spark.yarn.submit.waitAppCompletion=true \ --conf spark.pyspark.driver.python=${sourceDIR}/py37/bin/python \ --conf spark.yarn.appMasterEnv.PYSPARK_PYTHON=python3env/py37/bin/python \ --conf spark.pyspark.python=python3env/py37/bin/python \ ./spark_script_demo_1009.py "$inputs" "$outputs"