idx
int64 0
7.85k
| idx_lca
int64 0
223
| offset
int64 162
55k
| repo
stringclasses 62
values | commit_hash
stringclasses 113
values | target_file
stringclasses 134
values | line_type_lca
stringclasses 7
values | ground_truth
stringlengths 1
46
| in_completions
bool 1
class | completion_type
stringclasses 6
values | non_dunder_count_intellij
int64 0
529
| non_dunder_count_jedi
int64 0
128
| start_with_
bool 2
classes | first_occurrence
bool 2
classes | intellij_completions
listlengths 1
532
| jedi_completions
listlengths 3
148
| prefix
stringlengths 162
55k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
343 | 14 | 12,648 |
googlecloudplatform__dataproc-templates
|
d62560011b069690d01cf2db563788bf81029623
|
python/test/jdbc/test_jdbc_to_gcs.py
|
Unknown
|
FORMAT_JDBC
| true |
statement
| 103 | 103 | false | false |
[
"FORMAT_JDBC",
"JDBC_URL",
"JDBC_TABLE",
"JDBC_DRIVER",
"FORMAT_AVRO",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HEADER",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INFER_SCHEMA",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write.mode().parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args3(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write avro"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=avro",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write.mode().format().save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args4(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write csv"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args5(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write json"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=json",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
#mock_spark_session.dataframe.DataFrame.write.mode().json.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args6(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.
|
344 | 14 | 12,738 |
googlecloudplatform__dataproc-templates
|
d62560011b069690d01cf2db563788bf81029623
|
python/test/jdbc/test_jdbc_to_gcs.py
|
Unknown
|
JDBC_URL
| true |
statement
| 103 | 103 | false | false |
[
"FORMAT_JDBC",
"JDBC_TABLE",
"JDBC_DRIVER",
"JDBC_URL",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HEADER",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INFER_SCHEMA",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write.mode().parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args3(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write avro"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=avro",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write.mode().format().save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args4(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write csv"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args5(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write json"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=json",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
#mock_spark_session.dataframe.DataFrame.write.mode().json.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args6(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.
|
345 | 14 | 12,841 |
googlecloudplatform__dataproc-templates
|
d62560011b069690d01cf2db563788bf81029623
|
python/test/jdbc/test_jdbc_to_gcs.py
|
Unknown
|
JDBC_DRIVER
| true |
statement
| 103 | 103 | false | false |
[
"FORMAT_JDBC",
"JDBC_URL",
"JDBC_TABLE",
"JDBC_NUMPARTITIONS",
"FORMAT_AVRO",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HEADER",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INFER_SCHEMA",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write.mode().parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args3(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write avro"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=avro",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write.mode().format().save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args4(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write csv"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args5(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write json"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=json",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
#mock_spark_session.dataframe.DataFrame.write.mode().json.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args6(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.
|
346 | 14 | 12,959 |
googlecloudplatform__dataproc-templates
|
d62560011b069690d01cf2db563788bf81029623
|
python/test/jdbc/test_jdbc_to_gcs.py
|
Unknown
|
JDBC_TABLE
| true |
statement
| 103 | 103 | false | false |
[
"FORMAT_JDBC",
"JDBC_URL",
"JDBC_DRIVER",
"JDBC_TABLE",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HEADER",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INFER_SCHEMA",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write.mode().parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args3(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write avro"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=avro",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write.mode().format().save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args4(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write csv"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args5(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write json"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=json",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
#mock_spark_session.dataframe.DataFrame.write.mode().json.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args6(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.
|
347 | 14 | 13,085 |
googlecloudplatform__dataproc-templates
|
d62560011b069690d01cf2db563788bf81029623
|
python/test/jdbc/test_jdbc_to_gcs.py
|
Unknown
|
JDBC_NUMPARTITIONS
| true |
statement
| 103 | 103 | false | false |
[
"FORMAT_JDBC",
"JDBC_URL",
"JDBC_TABLE",
"JDBC_DRIVER",
"FORMAT_AVRO",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HEADER",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INFER_SCHEMA",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write.mode().parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args3(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write avro"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=avro",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write.mode().format().save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args4(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write csv"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args5(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write json"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=json",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
#mock_spark_session.dataframe.DataFrame.write.mode().json.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args6(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.
|
348 | 14 | 13,287 |
googlecloudplatform__dataproc-templates
|
d62560011b069690d01cf2db563788bf81029623
|
python/test/jdbc/test_jdbc_to_gcs.py
|
Unknown
|
OUTPUT_MODE_APPEND
| true |
statement
| 103 | 103 | false | false |
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HEADER",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INFER_SCHEMA",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write.mode().parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args3(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write avro"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=avro",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write.mode().format().save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args4(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write csv"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args5(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write json"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=json",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
#mock_spark_session.dataframe.DataFrame.write.mode().json.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args6(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "10")
mock_spark_session.read.format().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.
|
349 | 14 | 13,801 |
googlecloudplatform__dataproc-templates
|
d62560011b069690d01cf2db563788bf81029623
|
python/test/jdbc/test_jdbc_to_gcs.py
|
Unknown
|
parse_args
| true |
function
| 4 | 4 | false | false |
[
"parse_args",
"run",
"build",
"get_logger",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "build",
"type": "function"
},
{
"name": "get_logger",
"type": "function"
},
{
"name": "parse_args",
"type": "function"
},
{
"name": "run",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write.mode().parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args3(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write avro"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=avro",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write.mode().format().save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args4(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write csv"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args5(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write json"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=json",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
#mock_spark_session.dataframe.DataFrame.write.mode().json.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args6(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "10")
mock_spark_session.read.format().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args7(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.
|
350 | 14 | 14,335 |
googlecloudplatform__dataproc-templates
|
d62560011b069690d01cf2db563788bf81029623
|
python/test/jdbc/test_jdbc_to_gcs.py
|
Unknown
|
run
| true |
function
| 4 | 4 | false | false |
[
"run",
"parse_args",
"build",
"get_logger",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "build",
"type": "function"
},
{
"name": "get_logger",
"type": "function"
},
{
"name": "parse_args",
"type": "function"
},
{
"name": "run",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write.mode().parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args3(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write avro"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=avro",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write.mode().format().save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args4(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write csv"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args5(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write json"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=json",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
#mock_spark_session.dataframe.DataFrame.write.mode().json.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args6(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "10")
mock_spark_session.read.format().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args7(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
mock_spark_session.read.format().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.
|
351 | 14 | 14,445 |
googlecloudplatform__dataproc-templates
|
d62560011b069690d01cf2db563788bf81029623
|
python/test/jdbc/test_jdbc_to_gcs.py
|
Unknown
|
FORMAT_JDBC
| true |
statement
| 103 | 103 | false | false |
[
"FORMAT_JDBC",
"JDBC_URL",
"JDBC_TABLE",
"JDBC_DRIVER",
"FORMAT_AVRO",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HEADER",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INFER_SCHEMA",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write.mode().parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args3(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write avro"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=avro",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write.mode().format().save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args4(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write csv"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args5(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write json"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=json",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
#mock_spark_session.dataframe.DataFrame.write.mode().json.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args6(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "10")
mock_spark_session.read.format().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args7(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
mock_spark_session.read.format().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.
|
352 | 14 | 14,535 |
googlecloudplatform__dataproc-templates
|
d62560011b069690d01cf2db563788bf81029623
|
python/test/jdbc/test_jdbc_to_gcs.py
|
Unknown
|
JDBC_URL
| true |
statement
| 103 | 103 | false | false |
[
"FORMAT_JDBC",
"JDBC_TABLE",
"JDBC_DRIVER",
"JDBC_URL",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HEADER",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INFER_SCHEMA",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write.mode().parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args3(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write avro"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=avro",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write.mode().format().save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args4(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write csv"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args5(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write json"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=json",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
#mock_spark_session.dataframe.DataFrame.write.mode().json.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args6(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "10")
mock_spark_session.read.format().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args7(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
mock_spark_session.read.format().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.
|
353 | 14 | 14,638 |
googlecloudplatform__dataproc-templates
|
d62560011b069690d01cf2db563788bf81029623
|
python/test/jdbc/test_jdbc_to_gcs.py
|
Unknown
|
JDBC_DRIVER
| true |
statement
| 103 | 103 | false | false |
[
"FORMAT_JDBC",
"JDBC_URL",
"JDBC_TABLE",
"JDBC_NUMPARTITIONS",
"JDBC_DRIVER",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HEADER",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INFER_SCHEMA",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write.mode().parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args3(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write avro"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=avro",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write.mode().format().save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args4(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write csv"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args5(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write json"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=json",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
#mock_spark_session.dataframe.DataFrame.write.mode().json.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args6(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "10")
mock_spark_session.read.format().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args7(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
mock_spark_session.read.format().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.
|
354 | 14 | 14,756 |
googlecloudplatform__dataproc-templates
|
d62560011b069690d01cf2db563788bf81029623
|
python/test/jdbc/test_jdbc_to_gcs.py
|
Unknown
|
JDBC_TABLE
| true |
statement
| 103 | 103 | false | false |
[
"FORMAT_JDBC",
"JDBC_URL",
"JDBC_DRIVER",
"JDBC_TABLE",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HEADER",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INFER_SCHEMA",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write.mode().parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args3(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write avro"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=avro",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write.mode().format().save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args4(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write csv"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args5(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write json"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=json",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
#mock_spark_session.dataframe.DataFrame.write.mode().json.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args6(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "10")
mock_spark_session.read.format().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args7(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
mock_spark_session.read.format().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.
|
355 | 14 | 14,882 |
googlecloudplatform__dataproc-templates
|
d62560011b069690d01cf2db563788bf81029623
|
python/test/jdbc/test_jdbc_to_gcs.py
|
Unknown
|
JDBC_NUMPARTITIONS
| true |
statement
| 103 | 103 | false | false |
[
"FORMAT_JDBC",
"JDBC_URL",
"JDBC_TABLE",
"JDBC_DRIVER",
"FORMAT_AVRO",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HEADER",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INFER_SCHEMA",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write.mode().parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args3(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write avro"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=avro",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write.mode().format().save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args4(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write csv"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args5(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write json"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=json",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
#mock_spark_session.dataframe.DataFrame.write.mode().json.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args6(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "10")
mock_spark_session.read.format().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args7(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
mock_spark_session.read.format().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.
|
356 | 14 | 15,084 |
googlecloudplatform__dataproc-templates
|
d62560011b069690d01cf2db563788bf81029623
|
python/test/jdbc/test_jdbc_to_gcs.py
|
Unknown
|
OUTPUT_MODE_APPEND
| true |
statement
| 103 | 103 | false | false |
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HEADER",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INFER_SCHEMA",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write.mode().parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args3(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write avro"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=avro",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write.mode().format().save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args4(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write csv"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args5(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write json"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=json",
"--jdbctogcs.output.mode=ignore"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
#mock_spark_session.dataframe.DataFrame.write.mode().json.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args6(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append"
])
mock_spark_session.read.format().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "10")
mock_spark_session.read.format().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write.mode().option.assert_called_once_with(constants.CSV_HEADER, True)
mock_spark_session.dataframe.DataFrame.write.mode().option().csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args7(self, mock_spark_session):
"""Tests JDBCToGCSTemplate pass args"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
mock_spark_session.read.format().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "10")
mock_spark_session.read.format().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.
|
357 | 15 | 1,021 |
googlecloudplatform__dataproc-templates
|
8b291caed55509d56d36f03c1c25762350b6f905
|
python/test/mongo/test_mongo_to_gcs.py
|
inproject
|
parse_args
| true |
function
| 4 | 4 | false | true |
[
"parse_args",
"run",
"build",
"get_logger",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "build",
"type": "function"
},
{
"name": "get_logger",
"type": "function"
},
{
"name": "parse_args",
"type": "function"
},
{
"name": "run",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.mongo.mongo_to_gcs import MongoToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestMongoToGCSTemplate:
"""
Test suite for MongoToGCSTemplate
"""
def test_parse_args(self):
"""Tests MongoToGCSTemplate.parse_args()"""
mongo_to_gcs_template = MongoToGCSTemplate()
parsed_args = mongo_to_gcs_template.
|
358 | 15 | 2,055 |
googlecloudplatform__dataproc-templates
|
8b291caed55509d56d36f03c1c25762350b6f905
|
python/test/mongo/test_mongo_to_gcs.py
|
Unknown
|
parse_args
| true |
function
| 4 | 4 | false | false |
[
"parse_args",
"run",
"build",
"get_logger",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "build",
"type": "function"
},
{
"name": "get_logger",
"type": "function"
},
{
"name": "parse_args",
"type": "function"
},
{
"name": "run",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.mongo.mongo_to_gcs import MongoToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestMongoToGCSTemplate:
"""
Test suite for MongoToGCSTemplate
"""
def test_parse_args(self):
"""Tests MongoToGCSTemplate.parse_args()"""
mongo_to_gcs_template = MongoToGCSTemplate()
parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
assert parsed_args["mongo.gcs.input.uri"] == "mongodb://host:port"
assert parsed_args["mongo.gcs.input.database"] == "database"
assert parsed_args["mongo.gcs.input.collection"] == "collection"
assert parsed_args["mongo.gcs.output.format"] == "parquet"
assert parsed_args["mongo.gcs.output.mode"] == "overwrite"
assert parsed_args["mongo.gcs.output.location"] == "gs://test"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for parquet format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.
|
359 | 15 | 2,559 |
googlecloudplatform__dataproc-templates
|
8b291caed55509d56d36f03c1c25762350b6f905
|
python/test/mongo/test_mongo_to_gcs.py
|
common
|
run
| true |
function
| 4 | 4 | false | true |
[
"run",
"parse_args",
"build",
"get_logger",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "build",
"type": "function"
},
{
"name": "get_logger",
"type": "function"
},
{
"name": "parse_args",
"type": "function"
},
{
"name": "run",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.mongo.mongo_to_gcs import MongoToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestMongoToGCSTemplate:
"""
Test suite for MongoToGCSTemplate
"""
def test_parse_args(self):
"""Tests MongoToGCSTemplate.parse_args()"""
mongo_to_gcs_template = MongoToGCSTemplate()
parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
assert parsed_args["mongo.gcs.input.uri"] == "mongodb://host:port"
assert parsed_args["mongo.gcs.input.database"] == "database"
assert parsed_args["mongo.gcs.input.collection"] == "collection"
assert parsed_args["mongo.gcs.output.format"] == "parquet"
assert parsed_args["mongo.gcs.output.mode"] == "overwrite"
assert parsed_args["mongo.gcs.output.location"] == "gs://test"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for parquet format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.
|
360 | 15 | 2,685 |
googlecloudplatform__dataproc-templates
|
8b291caed55509d56d36f03c1c25762350b6f905
|
python/test/mongo/test_mongo_to_gcs.py
|
Unknown
|
FORMAT_MONGO
| true |
statement
| 123 | 123 | false | true |
[
"FORMAT_MONGO",
"MONGO_DATABASE",
"FORMAT_AVRO",
"MONGO_COLLECTION",
"HEADER",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JDBC",
"FORMAT_JSON",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"GCS_MONGO_BATCH_SIZE",
"GCS_MONGO_INPUT_FORMAT",
"GCS_MONGO_INPUT_LOCATION",
"GCS_MONGO_OUTPUT_COLLECTION",
"GCS_MONGO_OUTPUT_DATABASE",
"GCS_MONGO_OUTPUT_MODE",
"GCS_MONGO_OUTPUT_URI",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INFER_SCHEMA",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_TABLE",
"JDBC_UPPERBOUND",
"JDBC_URL",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"MONGO_BATCH_SIZE",
"MONGO_DEFAULT_BATCH_SIZE",
"MONGO_GCS_INPUT_COLLECTION",
"MONGO_GCS_INPUT_DATABASE",
"MONGO_GCS_INPUT_URI",
"MONGO_GCS_OUTPUT_FORMAT",
"MONGO_GCS_OUTPUT_LOCATION",
"MONGO_GCS_OUTPUT_MODE",
"MONGO_INPUT_URI",
"MONGO_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_MONGO",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "GCS_MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_COLLECTION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_DATABASE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_URI",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_DATABASE",
"type": "statement"
},
{
"name": "MONGO_DEFAULT_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_URI",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "MONGO_INPUT_URI",
"type": "statement"
},
{
"name": "MONGO_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.mongo.mongo_to_gcs import MongoToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestMongoToGCSTemplate:
"""
Test suite for MongoToGCSTemplate
"""
def test_parse_args(self):
"""Tests MongoToGCSTemplate.parse_args()"""
mongo_to_gcs_template = MongoToGCSTemplate()
parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
assert parsed_args["mongo.gcs.input.uri"] == "mongodb://host:port"
assert parsed_args["mongo.gcs.input.database"] == "database"
assert parsed_args["mongo.gcs.input.collection"] == "collection"
assert parsed_args["mongo.gcs.output.format"] == "parquet"
assert parsed_args["mongo.gcs.output.mode"] == "overwrite"
assert parsed_args["mongo.gcs.output.location"] == "gs://test"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for parquet format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.
|
361 | 15 | 2,830 |
googlecloudplatform__dataproc-templates
|
8b291caed55509d56d36f03c1c25762350b6f905
|
python/test/mongo/test_mongo_to_gcs.py
|
random
|
MONGO_DATABASE
| true |
statement
| 123 | 123 | false | true |
[
"FORMAT_MONGO",
"MONGO_COLLECTION",
"MONGO_DATABASE",
"HEADER",
"FORMAT_AVRO",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JDBC",
"FORMAT_JSON",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"GCS_MONGO_BATCH_SIZE",
"GCS_MONGO_INPUT_FORMAT",
"GCS_MONGO_INPUT_LOCATION",
"GCS_MONGO_OUTPUT_COLLECTION",
"GCS_MONGO_OUTPUT_DATABASE",
"GCS_MONGO_OUTPUT_MODE",
"GCS_MONGO_OUTPUT_URI",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INFER_SCHEMA",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_TABLE",
"JDBC_UPPERBOUND",
"JDBC_URL",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"MONGO_BATCH_SIZE",
"MONGO_DEFAULT_BATCH_SIZE",
"MONGO_GCS_INPUT_COLLECTION",
"MONGO_GCS_INPUT_DATABASE",
"MONGO_GCS_INPUT_URI",
"MONGO_GCS_OUTPUT_FORMAT",
"MONGO_GCS_OUTPUT_LOCATION",
"MONGO_GCS_OUTPUT_MODE",
"MONGO_INPUT_URI",
"MONGO_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_MONGO",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "GCS_MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_COLLECTION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_DATABASE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_URI",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_DATABASE",
"type": "statement"
},
{
"name": "MONGO_DEFAULT_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_URI",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "MONGO_INPUT_URI",
"type": "statement"
},
{
"name": "MONGO_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.mongo.mongo_to_gcs import MongoToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestMongoToGCSTemplate:
"""
Test suite for MongoToGCSTemplate
"""
def test_parse_args(self):
"""Tests MongoToGCSTemplate.parse_args()"""
mongo_to_gcs_template = MongoToGCSTemplate()
parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
assert parsed_args["mongo.gcs.input.uri"] == "mongodb://host:port"
assert parsed_args["mongo.gcs.input.database"] == "database"
assert parsed_args["mongo.gcs.input.collection"] == "collection"
assert parsed_args["mongo.gcs.output.format"] == "parquet"
assert parsed_args["mongo.gcs.output.mode"] == "overwrite"
assert parsed_args["mongo.gcs.output.location"] == "gs://test"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for parquet format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.
|
362 | 15 | 3,012 |
googlecloudplatform__dataproc-templates
|
8b291caed55509d56d36f03c1c25762350b6f905
|
python/test/mongo/test_mongo_to_gcs.py
|
Unknown
|
MONGO_COLLECTION
| true |
statement
| 123 | 123 | false | true |
[
"FORMAT_MONGO",
"MONGO_DATABASE",
"MONGO_COLLECTION",
"HEADER",
"FORMAT_AVRO",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JDBC",
"FORMAT_JSON",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"GCS_MONGO_BATCH_SIZE",
"GCS_MONGO_INPUT_FORMAT",
"GCS_MONGO_INPUT_LOCATION",
"GCS_MONGO_OUTPUT_COLLECTION",
"GCS_MONGO_OUTPUT_DATABASE",
"GCS_MONGO_OUTPUT_MODE",
"GCS_MONGO_OUTPUT_URI",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INFER_SCHEMA",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_TABLE",
"JDBC_UPPERBOUND",
"JDBC_URL",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"MONGO_BATCH_SIZE",
"MONGO_DEFAULT_BATCH_SIZE",
"MONGO_GCS_INPUT_COLLECTION",
"MONGO_GCS_INPUT_DATABASE",
"MONGO_GCS_INPUT_URI",
"MONGO_GCS_OUTPUT_FORMAT",
"MONGO_GCS_OUTPUT_LOCATION",
"MONGO_GCS_OUTPUT_MODE",
"MONGO_INPUT_URI",
"MONGO_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_MONGO",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "GCS_MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_COLLECTION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_DATABASE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_URI",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_DATABASE",
"type": "statement"
},
{
"name": "MONGO_DEFAULT_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_URI",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "MONGO_INPUT_URI",
"type": "statement"
},
{
"name": "MONGO_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.mongo.mongo_to_gcs import MongoToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestMongoToGCSTemplate:
"""
Test suite for MongoToGCSTemplate
"""
def test_parse_args(self):
"""Tests MongoToGCSTemplate.parse_args()"""
mongo_to_gcs_template = MongoToGCSTemplate()
parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
assert parsed_args["mongo.gcs.input.uri"] == "mongodb://host:port"
assert parsed_args["mongo.gcs.input.database"] == "database"
assert parsed_args["mongo.gcs.input.collection"] == "collection"
assert parsed_args["mongo.gcs.output.format"] == "parquet"
assert parsed_args["mongo.gcs.output.mode"] == "overwrite"
assert parsed_args["mongo.gcs.output.location"] == "gs://test"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for parquet format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.
|
363 | 15 | 3,319 |
googlecloudplatform__dataproc-templates
|
8b291caed55509d56d36f03c1c25762350b6f905
|
python/test/mongo/test_mongo_to_gcs.py
|
Unknown
|
OUTPUT_MODE_OVERWRITE
| true |
statement
| 123 | 123 | false | true |
[
"FORMAT_MONGO",
"MONGO_DATABASE",
"MONGO_COLLECTION",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_IGNORE",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JDBC",
"FORMAT_JSON",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"GCS_MONGO_BATCH_SIZE",
"GCS_MONGO_INPUT_FORMAT",
"GCS_MONGO_INPUT_LOCATION",
"GCS_MONGO_OUTPUT_COLLECTION",
"GCS_MONGO_OUTPUT_DATABASE",
"GCS_MONGO_OUTPUT_MODE",
"GCS_MONGO_OUTPUT_URI",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HEADER",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INFER_SCHEMA",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_TABLE",
"JDBC_UPPERBOUND",
"JDBC_URL",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"MONGO_BATCH_SIZE",
"MONGO_DEFAULT_BATCH_SIZE",
"MONGO_GCS_INPUT_COLLECTION",
"MONGO_GCS_INPUT_DATABASE",
"MONGO_GCS_INPUT_URI",
"MONGO_GCS_OUTPUT_FORMAT",
"MONGO_GCS_OUTPUT_LOCATION",
"MONGO_GCS_OUTPUT_MODE",
"MONGO_INPUT_URI",
"MONGO_URL",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_MONGO",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "GCS_MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_COLLECTION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_DATABASE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_URI",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_DATABASE",
"type": "statement"
},
{
"name": "MONGO_DEFAULT_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_URI",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "MONGO_INPUT_URI",
"type": "statement"
},
{
"name": "MONGO_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.mongo.mongo_to_gcs import MongoToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestMongoToGCSTemplate:
"""
Test suite for MongoToGCSTemplate
"""
def test_parse_args(self):
"""Tests MongoToGCSTemplate.parse_args()"""
mongo_to_gcs_template = MongoToGCSTemplate()
parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
assert parsed_args["mongo.gcs.input.uri"] == "mongodb://host:port"
assert parsed_args["mongo.gcs.input.database"] == "database"
assert parsed_args["mongo.gcs.input.collection"] == "collection"
assert parsed_args["mongo.gcs.output.format"] == "parquet"
assert parsed_args["mongo.gcs.output.mode"] == "overwrite"
assert parsed_args["mongo.gcs.output.location"] == "gs://test"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for parquet format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.
|
364 | 15 | 3,749 |
googlecloudplatform__dataproc-templates
|
8b291caed55509d56d36f03c1c25762350b6f905
|
python/test/mongo/test_mongo_to_gcs.py
|
Unknown
|
parse_args
| true |
function
| 4 | 4 | false | false |
[
"parse_args",
"run",
"build",
"get_logger",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "build",
"type": "function"
},
{
"name": "get_logger",
"type": "function"
},
{
"name": "parse_args",
"type": "function"
},
{
"name": "run",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.mongo.mongo_to_gcs import MongoToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestMongoToGCSTemplate:
"""
Test suite for MongoToGCSTemplate
"""
def test_parse_args(self):
"""Tests MongoToGCSTemplate.parse_args()"""
mongo_to_gcs_template = MongoToGCSTemplate()
parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
assert parsed_args["mongo.gcs.input.uri"] == "mongodb://host:port"
assert parsed_args["mongo.gcs.input.database"] == "database"
assert parsed_args["mongo.gcs.input.collection"] == "collection"
assert parsed_args["mongo.gcs.output.format"] == "parquet"
assert parsed_args["mongo.gcs.output.mode"] == "overwrite"
assert parsed_args["mongo.gcs.output.location"] == "gs://test"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for parquet format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_avro(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for avro format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.
|
365 | 15 | 4,247 |
googlecloudplatform__dataproc-templates
|
8b291caed55509d56d36f03c1c25762350b6f905
|
python/test/mongo/test_mongo_to_gcs.py
|
Unknown
|
run
| true |
function
| 4 | 4 | false | false |
[
"run",
"parse_args",
"build",
"get_logger",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "build",
"type": "function"
},
{
"name": "get_logger",
"type": "function"
},
{
"name": "parse_args",
"type": "function"
},
{
"name": "run",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.mongo.mongo_to_gcs import MongoToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestMongoToGCSTemplate:
"""
Test suite for MongoToGCSTemplate
"""
def test_parse_args(self):
"""Tests MongoToGCSTemplate.parse_args()"""
mongo_to_gcs_template = MongoToGCSTemplate()
parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
assert parsed_args["mongo.gcs.input.uri"] == "mongodb://host:port"
assert parsed_args["mongo.gcs.input.database"] == "database"
assert parsed_args["mongo.gcs.input.collection"] == "collection"
assert parsed_args["mongo.gcs.output.format"] == "parquet"
assert parsed_args["mongo.gcs.output.mode"] == "overwrite"
assert parsed_args["mongo.gcs.output.location"] == "gs://test"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for parquet format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_avro(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for avro format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=avro",
"--mongo.gcs.output.mode=append",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.
|
366 | 15 | 4,373 |
googlecloudplatform__dataproc-templates
|
8b291caed55509d56d36f03c1c25762350b6f905
|
python/test/mongo/test_mongo_to_gcs.py
|
Unknown
|
FORMAT_MONGO
| true |
statement
| 123 | 123 | false | false |
[
"FORMAT_MONGO",
"FORMAT_AVRO",
"MONGO_DATABASE",
"MONGO_COLLECTION",
"HEADER",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JDBC",
"FORMAT_JSON",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"GCS_MONGO_BATCH_SIZE",
"GCS_MONGO_INPUT_FORMAT",
"GCS_MONGO_INPUT_LOCATION",
"GCS_MONGO_OUTPUT_COLLECTION",
"GCS_MONGO_OUTPUT_DATABASE",
"GCS_MONGO_OUTPUT_MODE",
"GCS_MONGO_OUTPUT_URI",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INFER_SCHEMA",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_TABLE",
"JDBC_UPPERBOUND",
"JDBC_URL",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"MONGO_BATCH_SIZE",
"MONGO_DEFAULT_BATCH_SIZE",
"MONGO_GCS_INPUT_COLLECTION",
"MONGO_GCS_INPUT_DATABASE",
"MONGO_GCS_INPUT_URI",
"MONGO_GCS_OUTPUT_FORMAT",
"MONGO_GCS_OUTPUT_LOCATION",
"MONGO_GCS_OUTPUT_MODE",
"MONGO_INPUT_URI",
"MONGO_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_MONGO",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "GCS_MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_COLLECTION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_DATABASE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_URI",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_DATABASE",
"type": "statement"
},
{
"name": "MONGO_DEFAULT_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_URI",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "MONGO_INPUT_URI",
"type": "statement"
},
{
"name": "MONGO_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.mongo.mongo_to_gcs import MongoToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestMongoToGCSTemplate:
"""
Test suite for MongoToGCSTemplate
"""
def test_parse_args(self):
"""Tests MongoToGCSTemplate.parse_args()"""
mongo_to_gcs_template = MongoToGCSTemplate()
parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
assert parsed_args["mongo.gcs.input.uri"] == "mongodb://host:port"
assert parsed_args["mongo.gcs.input.database"] == "database"
assert parsed_args["mongo.gcs.input.collection"] == "collection"
assert parsed_args["mongo.gcs.output.format"] == "parquet"
assert parsed_args["mongo.gcs.output.mode"] == "overwrite"
assert parsed_args["mongo.gcs.output.location"] == "gs://test"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for parquet format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_avro(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for avro format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=avro",
"--mongo.gcs.output.mode=append",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.
|
367 | 15 | 4,518 |
googlecloudplatform__dataproc-templates
|
8b291caed55509d56d36f03c1c25762350b6f905
|
python/test/mongo/test_mongo_to_gcs.py
|
Unknown
|
MONGO_DATABASE
| true |
statement
| 123 | 123 | false | false |
[
"FORMAT_MONGO",
"MONGO_COLLECTION",
"MONGO_DATABASE",
"FORMAT_AVRO",
"HEADER",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JDBC",
"FORMAT_JSON",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"GCS_MONGO_BATCH_SIZE",
"GCS_MONGO_INPUT_FORMAT",
"GCS_MONGO_INPUT_LOCATION",
"GCS_MONGO_OUTPUT_COLLECTION",
"GCS_MONGO_OUTPUT_DATABASE",
"GCS_MONGO_OUTPUT_MODE",
"GCS_MONGO_OUTPUT_URI",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INFER_SCHEMA",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_TABLE",
"JDBC_UPPERBOUND",
"JDBC_URL",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"MONGO_BATCH_SIZE",
"MONGO_DEFAULT_BATCH_SIZE",
"MONGO_GCS_INPUT_COLLECTION",
"MONGO_GCS_INPUT_DATABASE",
"MONGO_GCS_INPUT_URI",
"MONGO_GCS_OUTPUT_FORMAT",
"MONGO_GCS_OUTPUT_LOCATION",
"MONGO_GCS_OUTPUT_MODE",
"MONGO_INPUT_URI",
"MONGO_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_MONGO",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "GCS_MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_COLLECTION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_DATABASE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_URI",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_DATABASE",
"type": "statement"
},
{
"name": "MONGO_DEFAULT_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_URI",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "MONGO_INPUT_URI",
"type": "statement"
},
{
"name": "MONGO_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.mongo.mongo_to_gcs import MongoToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestMongoToGCSTemplate:
"""
Test suite for MongoToGCSTemplate
"""
def test_parse_args(self):
"""Tests MongoToGCSTemplate.parse_args()"""
mongo_to_gcs_template = MongoToGCSTemplate()
parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
assert parsed_args["mongo.gcs.input.uri"] == "mongodb://host:port"
assert parsed_args["mongo.gcs.input.database"] == "database"
assert parsed_args["mongo.gcs.input.collection"] == "collection"
assert parsed_args["mongo.gcs.output.format"] == "parquet"
assert parsed_args["mongo.gcs.output.mode"] == "overwrite"
assert parsed_args["mongo.gcs.output.location"] == "gs://test"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for parquet format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_avro(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for avro format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=avro",
"--mongo.gcs.output.mode=append",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.
|
368 | 15 | 4,700 |
googlecloudplatform__dataproc-templates
|
8b291caed55509d56d36f03c1c25762350b6f905
|
python/test/mongo/test_mongo_to_gcs.py
|
Unknown
|
MONGO_COLLECTION
| true |
statement
| 123 | 123 | false | false |
[
"FORMAT_MONGO",
"MONGO_DATABASE",
"MONGO_COLLECTION",
"FORMAT_AVRO",
"HEADER",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JDBC",
"FORMAT_JSON",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"GCS_MONGO_BATCH_SIZE",
"GCS_MONGO_INPUT_FORMAT",
"GCS_MONGO_INPUT_LOCATION",
"GCS_MONGO_OUTPUT_COLLECTION",
"GCS_MONGO_OUTPUT_DATABASE",
"GCS_MONGO_OUTPUT_MODE",
"GCS_MONGO_OUTPUT_URI",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INFER_SCHEMA",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_TABLE",
"JDBC_UPPERBOUND",
"JDBC_URL",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"MONGO_BATCH_SIZE",
"MONGO_DEFAULT_BATCH_SIZE",
"MONGO_GCS_INPUT_COLLECTION",
"MONGO_GCS_INPUT_DATABASE",
"MONGO_GCS_INPUT_URI",
"MONGO_GCS_OUTPUT_FORMAT",
"MONGO_GCS_OUTPUT_LOCATION",
"MONGO_GCS_OUTPUT_MODE",
"MONGO_INPUT_URI",
"MONGO_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_MONGO",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "GCS_MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_COLLECTION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_DATABASE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_URI",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_DATABASE",
"type": "statement"
},
{
"name": "MONGO_DEFAULT_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_URI",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "MONGO_INPUT_URI",
"type": "statement"
},
{
"name": "MONGO_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.mongo.mongo_to_gcs import MongoToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestMongoToGCSTemplate:
"""
Test suite for MongoToGCSTemplate
"""
def test_parse_args(self):
"""Tests MongoToGCSTemplate.parse_args()"""
mongo_to_gcs_template = MongoToGCSTemplate()
parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
assert parsed_args["mongo.gcs.input.uri"] == "mongodb://host:port"
assert parsed_args["mongo.gcs.input.database"] == "database"
assert parsed_args["mongo.gcs.input.collection"] == "collection"
assert parsed_args["mongo.gcs.output.format"] == "parquet"
assert parsed_args["mongo.gcs.output.mode"] == "overwrite"
assert parsed_args["mongo.gcs.output.location"] == "gs://test"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for parquet format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_avro(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for avro format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=avro",
"--mongo.gcs.output.mode=append",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.
|
369 | 15 | 5,007 |
googlecloudplatform__dataproc-templates
|
8b291caed55509d56d36f03c1c25762350b6f905
|
python/test/mongo/test_mongo_to_gcs.py
|
Unknown
|
OUTPUT_MODE_APPEND
| true |
statement
| 123 | 123 | false | true |
[
"FORMAT_MONGO",
"MONGO_DATABASE",
"MONGO_COLLECTION",
"FORMAT_AVRO",
"OUTPUT_MODE_IGNORE",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JDBC",
"FORMAT_JSON",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"GCS_MONGO_BATCH_SIZE",
"GCS_MONGO_INPUT_FORMAT",
"GCS_MONGO_INPUT_LOCATION",
"GCS_MONGO_OUTPUT_COLLECTION",
"GCS_MONGO_OUTPUT_DATABASE",
"GCS_MONGO_OUTPUT_MODE",
"GCS_MONGO_OUTPUT_URI",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HEADER",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INFER_SCHEMA",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_TABLE",
"JDBC_UPPERBOUND",
"JDBC_URL",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"MONGO_BATCH_SIZE",
"MONGO_DEFAULT_BATCH_SIZE",
"MONGO_GCS_INPUT_COLLECTION",
"MONGO_GCS_INPUT_DATABASE",
"MONGO_GCS_INPUT_URI",
"MONGO_GCS_OUTPUT_FORMAT",
"MONGO_GCS_OUTPUT_LOCATION",
"MONGO_GCS_OUTPUT_MODE",
"MONGO_INPUT_URI",
"MONGO_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_MONGO",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "GCS_MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_COLLECTION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_DATABASE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_URI",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_DATABASE",
"type": "statement"
},
{
"name": "MONGO_DEFAULT_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_URI",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "MONGO_INPUT_URI",
"type": "statement"
},
{
"name": "MONGO_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.mongo.mongo_to_gcs import MongoToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestMongoToGCSTemplate:
"""
Test suite for MongoToGCSTemplate
"""
def test_parse_args(self):
"""Tests MongoToGCSTemplate.parse_args()"""
mongo_to_gcs_template = MongoToGCSTemplate()
parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
assert parsed_args["mongo.gcs.input.uri"] == "mongodb://host:port"
assert parsed_args["mongo.gcs.input.database"] == "database"
assert parsed_args["mongo.gcs.input.collection"] == "collection"
assert parsed_args["mongo.gcs.output.format"] == "parquet"
assert parsed_args["mongo.gcs.output.mode"] == "overwrite"
assert parsed_args["mongo.gcs.output.location"] == "gs://test"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for parquet format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_avro(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for avro format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=avro",
"--mongo.gcs.output.mode=append",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.
|
370 | 15 | 5,158 |
googlecloudplatform__dataproc-templates
|
8b291caed55509d56d36f03c1c25762350b6f905
|
python/test/mongo/test_mongo_to_gcs.py
|
Unknown
|
FORMAT_AVRO
| true |
statement
| 123 | 123 | false | true |
[
"FORMAT_MONGO",
"MONGO_DATABASE",
"MONGO_COLLECTION",
"HEADER",
"FORMAT_AVRO",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JDBC",
"FORMAT_JSON",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"GCS_MONGO_BATCH_SIZE",
"GCS_MONGO_INPUT_FORMAT",
"GCS_MONGO_INPUT_LOCATION",
"GCS_MONGO_OUTPUT_COLLECTION",
"GCS_MONGO_OUTPUT_DATABASE",
"GCS_MONGO_OUTPUT_MODE",
"GCS_MONGO_OUTPUT_URI",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INFER_SCHEMA",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_TABLE",
"JDBC_UPPERBOUND",
"JDBC_URL",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"MONGO_BATCH_SIZE",
"MONGO_DEFAULT_BATCH_SIZE",
"MONGO_GCS_INPUT_COLLECTION",
"MONGO_GCS_INPUT_DATABASE",
"MONGO_GCS_INPUT_URI",
"MONGO_GCS_OUTPUT_FORMAT",
"MONGO_GCS_OUTPUT_LOCATION",
"MONGO_GCS_OUTPUT_MODE",
"MONGO_INPUT_URI",
"MONGO_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_MONGO",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "GCS_MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_COLLECTION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_DATABASE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_URI",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_DATABASE",
"type": "statement"
},
{
"name": "MONGO_DEFAULT_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_URI",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "MONGO_INPUT_URI",
"type": "statement"
},
{
"name": "MONGO_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.mongo.mongo_to_gcs import MongoToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestMongoToGCSTemplate:
"""
Test suite for MongoToGCSTemplate
"""
def test_parse_args(self):
"""Tests MongoToGCSTemplate.parse_args()"""
mongo_to_gcs_template = MongoToGCSTemplate()
parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
assert parsed_args["mongo.gcs.input.uri"] == "mongodb://host:port"
assert parsed_args["mongo.gcs.input.database"] == "database"
assert parsed_args["mongo.gcs.input.collection"] == "collection"
assert parsed_args["mongo.gcs.output.format"] == "parquet"
assert parsed_args["mongo.gcs.output.mode"] == "overwrite"
assert parsed_args["mongo.gcs.output.location"] == "gs://test"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for parquet format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_avro(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for avro format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=avro",
"--mongo.gcs.output.mode=append",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.format.assert_called_once_with(constants.
|
371 | 15 | 5,597 |
googlecloudplatform__dataproc-templates
|
8b291caed55509d56d36f03c1c25762350b6f905
|
python/test/mongo/test_mongo_to_gcs.py
|
inproject
|
parse_args
| true |
function
| 4 | 4 | false | false |
[
"parse_args",
"run",
"build",
"get_logger",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "build",
"type": "function"
},
{
"name": "get_logger",
"type": "function"
},
{
"name": "parse_args",
"type": "function"
},
{
"name": "run",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.mongo.mongo_to_gcs import MongoToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestMongoToGCSTemplate:
"""
Test suite for MongoToGCSTemplate
"""
def test_parse_args(self):
"""Tests MongoToGCSTemplate.parse_args()"""
mongo_to_gcs_template = MongoToGCSTemplate()
parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
assert parsed_args["mongo.gcs.input.uri"] == "mongodb://host:port"
assert parsed_args["mongo.gcs.input.database"] == "database"
assert parsed_args["mongo.gcs.input.collection"] == "collection"
assert parsed_args["mongo.gcs.output.format"] == "parquet"
assert parsed_args["mongo.gcs.output.mode"] == "overwrite"
assert parsed_args["mongo.gcs.output.location"] == "gs://test"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for parquet format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_avro(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for avro format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=avro",
"--mongo.gcs.output.mode=append",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.format() \
.save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_csv(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for csv format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.
|
372 | 15 | 6,094 |
googlecloudplatform__dataproc-templates
|
8b291caed55509d56d36f03c1c25762350b6f905
|
python/test/mongo/test_mongo_to_gcs.py
|
Unknown
|
run
| true |
function
| 4 | 4 | false | false |
[
"run",
"parse_args",
"build",
"get_logger",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "build",
"type": "function"
},
{
"name": "get_logger",
"type": "function"
},
{
"name": "parse_args",
"type": "function"
},
{
"name": "run",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.mongo.mongo_to_gcs import MongoToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestMongoToGCSTemplate:
"""
Test suite for MongoToGCSTemplate
"""
def test_parse_args(self):
"""Tests MongoToGCSTemplate.parse_args()"""
mongo_to_gcs_template = MongoToGCSTemplate()
parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
assert parsed_args["mongo.gcs.input.uri"] == "mongodb://host:port"
assert parsed_args["mongo.gcs.input.database"] == "database"
assert parsed_args["mongo.gcs.input.collection"] == "collection"
assert parsed_args["mongo.gcs.output.format"] == "parquet"
assert parsed_args["mongo.gcs.output.mode"] == "overwrite"
assert parsed_args["mongo.gcs.output.location"] == "gs://test"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for parquet format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_avro(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for avro format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=avro",
"--mongo.gcs.output.mode=append",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.format() \
.save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_csv(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for csv format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=csv",
"--mongo.gcs.output.mode=ignore",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.
|
373 | 15 | 6,220 |
googlecloudplatform__dataproc-templates
|
8b291caed55509d56d36f03c1c25762350b6f905
|
python/test/mongo/test_mongo_to_gcs.py
|
Unknown
|
FORMAT_MONGO
| true |
statement
| 123 | 123 | false | false |
[
"FORMAT_MONGO",
"MONGO_DATABASE",
"MONGO_COLLECTION",
"FORMAT_AVRO",
"HEADER",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JDBC",
"FORMAT_JSON",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"GCS_MONGO_BATCH_SIZE",
"GCS_MONGO_INPUT_FORMAT",
"GCS_MONGO_INPUT_LOCATION",
"GCS_MONGO_OUTPUT_COLLECTION",
"GCS_MONGO_OUTPUT_DATABASE",
"GCS_MONGO_OUTPUT_MODE",
"GCS_MONGO_OUTPUT_URI",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INFER_SCHEMA",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_TABLE",
"JDBC_UPPERBOUND",
"JDBC_URL",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"MONGO_BATCH_SIZE",
"MONGO_DEFAULT_BATCH_SIZE",
"MONGO_GCS_INPUT_COLLECTION",
"MONGO_GCS_INPUT_DATABASE",
"MONGO_GCS_INPUT_URI",
"MONGO_GCS_OUTPUT_FORMAT",
"MONGO_GCS_OUTPUT_LOCATION",
"MONGO_GCS_OUTPUT_MODE",
"MONGO_INPUT_URI",
"MONGO_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_MONGO",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "GCS_MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_COLLECTION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_DATABASE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_URI",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_DATABASE",
"type": "statement"
},
{
"name": "MONGO_DEFAULT_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_URI",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "MONGO_INPUT_URI",
"type": "statement"
},
{
"name": "MONGO_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.mongo.mongo_to_gcs import MongoToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestMongoToGCSTemplate:
"""
Test suite for MongoToGCSTemplate
"""
def test_parse_args(self):
"""Tests MongoToGCSTemplate.parse_args()"""
mongo_to_gcs_template = MongoToGCSTemplate()
parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
assert parsed_args["mongo.gcs.input.uri"] == "mongodb://host:port"
assert parsed_args["mongo.gcs.input.database"] == "database"
assert parsed_args["mongo.gcs.input.collection"] == "collection"
assert parsed_args["mongo.gcs.output.format"] == "parquet"
assert parsed_args["mongo.gcs.output.mode"] == "overwrite"
assert parsed_args["mongo.gcs.output.location"] == "gs://test"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for parquet format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_avro(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for avro format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=avro",
"--mongo.gcs.output.mode=append",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.format() \
.save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_csv(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for csv format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=csv",
"--mongo.gcs.output.mode=ignore",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.
|
374 | 15 | 6,365 |
googlecloudplatform__dataproc-templates
|
8b291caed55509d56d36f03c1c25762350b6f905
|
python/test/mongo/test_mongo_to_gcs.py
|
Unknown
|
MONGO_DATABASE
| true |
statement
| 123 | 123 | false | false |
[
"FORMAT_MONGO",
"MONGO_COLLECTION",
"MONGO_DATABASE",
"HEADER",
"FORMAT_AVRO",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JDBC",
"FORMAT_JSON",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"GCS_MONGO_BATCH_SIZE",
"GCS_MONGO_INPUT_FORMAT",
"GCS_MONGO_INPUT_LOCATION",
"GCS_MONGO_OUTPUT_COLLECTION",
"GCS_MONGO_OUTPUT_DATABASE",
"GCS_MONGO_OUTPUT_MODE",
"GCS_MONGO_OUTPUT_URI",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INFER_SCHEMA",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_TABLE",
"JDBC_UPPERBOUND",
"JDBC_URL",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"MONGO_BATCH_SIZE",
"MONGO_DEFAULT_BATCH_SIZE",
"MONGO_GCS_INPUT_COLLECTION",
"MONGO_GCS_INPUT_DATABASE",
"MONGO_GCS_INPUT_URI",
"MONGO_GCS_OUTPUT_FORMAT",
"MONGO_GCS_OUTPUT_LOCATION",
"MONGO_GCS_OUTPUT_MODE",
"MONGO_INPUT_URI",
"MONGO_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_MONGO",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "GCS_MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_COLLECTION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_DATABASE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_URI",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_DATABASE",
"type": "statement"
},
{
"name": "MONGO_DEFAULT_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_URI",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "MONGO_INPUT_URI",
"type": "statement"
},
{
"name": "MONGO_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.mongo.mongo_to_gcs import MongoToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestMongoToGCSTemplate:
"""
Test suite for MongoToGCSTemplate
"""
def test_parse_args(self):
"""Tests MongoToGCSTemplate.parse_args()"""
mongo_to_gcs_template = MongoToGCSTemplate()
parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
assert parsed_args["mongo.gcs.input.uri"] == "mongodb://host:port"
assert parsed_args["mongo.gcs.input.database"] == "database"
assert parsed_args["mongo.gcs.input.collection"] == "collection"
assert parsed_args["mongo.gcs.output.format"] == "parquet"
assert parsed_args["mongo.gcs.output.mode"] == "overwrite"
assert parsed_args["mongo.gcs.output.location"] == "gs://test"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for parquet format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_avro(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for avro format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=avro",
"--mongo.gcs.output.mode=append",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.format() \
.save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_csv(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for csv format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=csv",
"--mongo.gcs.output.mode=ignore",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.
|
375 | 15 | 6,547 |
googlecloudplatform__dataproc-templates
|
8b291caed55509d56d36f03c1c25762350b6f905
|
python/test/mongo/test_mongo_to_gcs.py
|
Unknown
|
MONGO_COLLECTION
| true |
statement
| 123 | 123 | false | false |
[
"FORMAT_MONGO",
"MONGO_DATABASE",
"MONGO_COLLECTION",
"HEADER",
"FORMAT_AVRO",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JDBC",
"FORMAT_JSON",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"GCS_MONGO_BATCH_SIZE",
"GCS_MONGO_INPUT_FORMAT",
"GCS_MONGO_INPUT_LOCATION",
"GCS_MONGO_OUTPUT_COLLECTION",
"GCS_MONGO_OUTPUT_DATABASE",
"GCS_MONGO_OUTPUT_MODE",
"GCS_MONGO_OUTPUT_URI",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INFER_SCHEMA",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_TABLE",
"JDBC_UPPERBOUND",
"JDBC_URL",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"MONGO_BATCH_SIZE",
"MONGO_DEFAULT_BATCH_SIZE",
"MONGO_GCS_INPUT_COLLECTION",
"MONGO_GCS_INPUT_DATABASE",
"MONGO_GCS_INPUT_URI",
"MONGO_GCS_OUTPUT_FORMAT",
"MONGO_GCS_OUTPUT_LOCATION",
"MONGO_GCS_OUTPUT_MODE",
"MONGO_INPUT_URI",
"MONGO_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_MONGO",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "GCS_MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_COLLECTION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_DATABASE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_URI",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_DATABASE",
"type": "statement"
},
{
"name": "MONGO_DEFAULT_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_URI",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "MONGO_INPUT_URI",
"type": "statement"
},
{
"name": "MONGO_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.mongo.mongo_to_gcs import MongoToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestMongoToGCSTemplate:
"""
Test suite for MongoToGCSTemplate
"""
def test_parse_args(self):
"""Tests MongoToGCSTemplate.parse_args()"""
mongo_to_gcs_template = MongoToGCSTemplate()
parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
assert parsed_args["mongo.gcs.input.uri"] == "mongodb://host:port"
assert parsed_args["mongo.gcs.input.database"] == "database"
assert parsed_args["mongo.gcs.input.collection"] == "collection"
assert parsed_args["mongo.gcs.output.format"] == "parquet"
assert parsed_args["mongo.gcs.output.mode"] == "overwrite"
assert parsed_args["mongo.gcs.output.location"] == "gs://test"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for parquet format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_avro(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for avro format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=avro",
"--mongo.gcs.output.mode=append",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.format() \
.save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_csv(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for csv format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=csv",
"--mongo.gcs.output.mode=ignore",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.
|
376 | 15 | 6,854 |
googlecloudplatform__dataproc-templates
|
8b291caed55509d56d36f03c1c25762350b6f905
|
python/test/mongo/test_mongo_to_gcs.py
|
Unknown
|
OUTPUT_MODE_IGNORE
| true |
statement
| 123 | 123 | false | true |
[
"FORMAT_MONGO",
"MONGO_DATABASE",
"MONGO_COLLECTION",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_OVERWRITE",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JDBC",
"FORMAT_JSON",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"GCS_MONGO_BATCH_SIZE",
"GCS_MONGO_INPUT_FORMAT",
"GCS_MONGO_INPUT_LOCATION",
"GCS_MONGO_OUTPUT_COLLECTION",
"GCS_MONGO_OUTPUT_DATABASE",
"GCS_MONGO_OUTPUT_MODE",
"GCS_MONGO_OUTPUT_URI",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HEADER",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INFER_SCHEMA",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_TABLE",
"JDBC_UPPERBOUND",
"JDBC_URL",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"MONGO_BATCH_SIZE",
"MONGO_DEFAULT_BATCH_SIZE",
"MONGO_GCS_INPUT_COLLECTION",
"MONGO_GCS_INPUT_DATABASE",
"MONGO_GCS_INPUT_URI",
"MONGO_GCS_OUTPUT_FORMAT",
"MONGO_GCS_OUTPUT_LOCATION",
"MONGO_GCS_OUTPUT_MODE",
"MONGO_INPUT_URI",
"MONGO_URL",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_MONGO",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "GCS_MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_COLLECTION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_DATABASE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_URI",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_DATABASE",
"type": "statement"
},
{
"name": "MONGO_DEFAULT_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_URI",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "MONGO_INPUT_URI",
"type": "statement"
},
{
"name": "MONGO_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.mongo.mongo_to_gcs import MongoToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestMongoToGCSTemplate:
"""
Test suite for MongoToGCSTemplate
"""
def test_parse_args(self):
"""Tests MongoToGCSTemplate.parse_args()"""
mongo_to_gcs_template = MongoToGCSTemplate()
parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
assert parsed_args["mongo.gcs.input.uri"] == "mongodb://host:port"
assert parsed_args["mongo.gcs.input.database"] == "database"
assert parsed_args["mongo.gcs.input.collection"] == "collection"
assert parsed_args["mongo.gcs.output.format"] == "parquet"
assert parsed_args["mongo.gcs.output.mode"] == "overwrite"
assert parsed_args["mongo.gcs.output.location"] == "gs://test"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for parquet format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_avro(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for avro format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=avro",
"--mongo.gcs.output.mode=append",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.format() \
.save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_csv(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for csv format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=csv",
"--mongo.gcs.output.mode=ignore",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.
|
377 | 15 | 7,005 |
googlecloudplatform__dataproc-templates
|
8b291caed55509d56d36f03c1c25762350b6f905
|
python/test/mongo/test_mongo_to_gcs.py
|
Unknown
|
HEADER
| true |
statement
| 123 | 123 | false | true |
[
"FORMAT_MONGO",
"MONGO_DATABASE",
"MONGO_COLLECTION",
"FORMAT_AVRO",
"OUTPUT_MODE_IGNORE",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JDBC",
"FORMAT_JSON",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"GCS_MONGO_BATCH_SIZE",
"GCS_MONGO_INPUT_FORMAT",
"GCS_MONGO_INPUT_LOCATION",
"GCS_MONGO_OUTPUT_COLLECTION",
"GCS_MONGO_OUTPUT_DATABASE",
"GCS_MONGO_OUTPUT_MODE",
"GCS_MONGO_OUTPUT_URI",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HEADER",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INFER_SCHEMA",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_TABLE",
"JDBC_UPPERBOUND",
"JDBC_URL",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"MONGO_BATCH_SIZE",
"MONGO_DEFAULT_BATCH_SIZE",
"MONGO_GCS_INPUT_COLLECTION",
"MONGO_GCS_INPUT_DATABASE",
"MONGO_GCS_INPUT_URI",
"MONGO_GCS_OUTPUT_FORMAT",
"MONGO_GCS_OUTPUT_LOCATION",
"MONGO_GCS_OUTPUT_MODE",
"MONGO_INPUT_URI",
"MONGO_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_MONGO",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "GCS_MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_COLLECTION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_DATABASE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_URI",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_DATABASE",
"type": "statement"
},
{
"name": "MONGO_DEFAULT_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_URI",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "MONGO_INPUT_URI",
"type": "statement"
},
{
"name": "MONGO_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.mongo.mongo_to_gcs import MongoToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestMongoToGCSTemplate:
"""
Test suite for MongoToGCSTemplate
"""
def test_parse_args(self):
"""Tests MongoToGCSTemplate.parse_args()"""
mongo_to_gcs_template = MongoToGCSTemplate()
parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
assert parsed_args["mongo.gcs.input.uri"] == "mongodb://host:port"
assert parsed_args["mongo.gcs.input.database"] == "database"
assert parsed_args["mongo.gcs.input.collection"] == "collection"
assert parsed_args["mongo.gcs.output.format"] == "parquet"
assert parsed_args["mongo.gcs.output.mode"] == "overwrite"
assert parsed_args["mongo.gcs.output.location"] == "gs://test"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for parquet format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_avro(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for avro format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=avro",
"--mongo.gcs.output.mode=append",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.format() \
.save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_csv(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for csv format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=csv",
"--mongo.gcs.output.mode=ignore",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.option.assert_called_once_with(constants.
|
378 | 15 | 7,446 |
googlecloudplatform__dataproc-templates
|
8b291caed55509d56d36f03c1c25762350b6f905
|
python/test/mongo/test_mongo_to_gcs.py
|
Unknown
|
parse_args
| true |
function
| 4 | 4 | false | false |
[
"parse_args",
"run",
"build",
"get_logger",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "build",
"type": "function"
},
{
"name": "get_logger",
"type": "function"
},
{
"name": "parse_args",
"type": "function"
},
{
"name": "run",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.mongo.mongo_to_gcs import MongoToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestMongoToGCSTemplate:
"""
Test suite for MongoToGCSTemplate
"""
def test_parse_args(self):
"""Tests MongoToGCSTemplate.parse_args()"""
mongo_to_gcs_template = MongoToGCSTemplate()
parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
assert parsed_args["mongo.gcs.input.uri"] == "mongodb://host:port"
assert parsed_args["mongo.gcs.input.database"] == "database"
assert parsed_args["mongo.gcs.input.collection"] == "collection"
assert parsed_args["mongo.gcs.output.format"] == "parquet"
assert parsed_args["mongo.gcs.output.mode"] == "overwrite"
assert parsed_args["mongo.gcs.output.location"] == "gs://test"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for parquet format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_avro(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for avro format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=avro",
"--mongo.gcs.output.mode=append",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.format() \
.save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_csv(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for csv format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=csv",
"--mongo.gcs.output.mode=ignore",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.option.assert_called_once_with(constants.HEADER, True)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.option() \
.csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_json(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for json format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.
|
379 | 15 | 7,951 |
googlecloudplatform__dataproc-templates
|
8b291caed55509d56d36f03c1c25762350b6f905
|
python/test/mongo/test_mongo_to_gcs.py
|
Unknown
|
run
| true |
function
| 4 | 4 | false | false |
[
"run",
"parse_args",
"build",
"get_logger",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "build",
"type": "function"
},
{
"name": "get_logger",
"type": "function"
},
{
"name": "parse_args",
"type": "function"
},
{
"name": "run",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.mongo.mongo_to_gcs import MongoToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestMongoToGCSTemplate:
"""
Test suite for MongoToGCSTemplate
"""
def test_parse_args(self):
"""Tests MongoToGCSTemplate.parse_args()"""
mongo_to_gcs_template = MongoToGCSTemplate()
parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
assert parsed_args["mongo.gcs.input.uri"] == "mongodb://host:port"
assert parsed_args["mongo.gcs.input.database"] == "database"
assert parsed_args["mongo.gcs.input.collection"] == "collection"
assert parsed_args["mongo.gcs.output.format"] == "parquet"
assert parsed_args["mongo.gcs.output.mode"] == "overwrite"
assert parsed_args["mongo.gcs.output.location"] == "gs://test"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for parquet format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_avro(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for avro format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=avro",
"--mongo.gcs.output.mode=append",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.format() \
.save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_csv(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for csv format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=csv",
"--mongo.gcs.output.mode=ignore",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.option.assert_called_once_with(constants.HEADER, True)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.option() \
.csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_json(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for json format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=json",
"--mongo.gcs.output.mode=errorifexists",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.
|
380 | 15 | 8,077 |
googlecloudplatform__dataproc-templates
|
8b291caed55509d56d36f03c1c25762350b6f905
|
python/test/mongo/test_mongo_to_gcs.py
|
Unknown
|
FORMAT_MONGO
| true |
statement
| 123 | 123 | false | false |
[
"FORMAT_MONGO",
"MONGO_DATABASE",
"FORMAT_AVRO",
"MONGO_COLLECTION",
"HEADER",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JDBC",
"FORMAT_JSON",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"GCS_MONGO_BATCH_SIZE",
"GCS_MONGO_INPUT_FORMAT",
"GCS_MONGO_INPUT_LOCATION",
"GCS_MONGO_OUTPUT_COLLECTION",
"GCS_MONGO_OUTPUT_DATABASE",
"GCS_MONGO_OUTPUT_MODE",
"GCS_MONGO_OUTPUT_URI",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INFER_SCHEMA",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_TABLE",
"JDBC_UPPERBOUND",
"JDBC_URL",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"MONGO_BATCH_SIZE",
"MONGO_DEFAULT_BATCH_SIZE",
"MONGO_GCS_INPUT_COLLECTION",
"MONGO_GCS_INPUT_DATABASE",
"MONGO_GCS_INPUT_URI",
"MONGO_GCS_OUTPUT_FORMAT",
"MONGO_GCS_OUTPUT_LOCATION",
"MONGO_GCS_OUTPUT_MODE",
"MONGO_INPUT_URI",
"MONGO_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_MONGO",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "GCS_MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_COLLECTION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_DATABASE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_URI",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_DATABASE",
"type": "statement"
},
{
"name": "MONGO_DEFAULT_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_URI",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "MONGO_INPUT_URI",
"type": "statement"
},
{
"name": "MONGO_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.mongo.mongo_to_gcs import MongoToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestMongoToGCSTemplate:
"""
Test suite for MongoToGCSTemplate
"""
def test_parse_args(self):
"""Tests MongoToGCSTemplate.parse_args()"""
mongo_to_gcs_template = MongoToGCSTemplate()
parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
assert parsed_args["mongo.gcs.input.uri"] == "mongodb://host:port"
assert parsed_args["mongo.gcs.input.database"] == "database"
assert parsed_args["mongo.gcs.input.collection"] == "collection"
assert parsed_args["mongo.gcs.output.format"] == "parquet"
assert parsed_args["mongo.gcs.output.mode"] == "overwrite"
assert parsed_args["mongo.gcs.output.location"] == "gs://test"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for parquet format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_avro(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for avro format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=avro",
"--mongo.gcs.output.mode=append",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.format() \
.save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_csv(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for csv format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=csv",
"--mongo.gcs.output.mode=ignore",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.option.assert_called_once_with(constants.HEADER, True)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.option() \
.csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_json(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for json format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=json",
"--mongo.gcs.output.mode=errorifexists",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.
|
381 | 15 | 8,222 |
googlecloudplatform__dataproc-templates
|
8b291caed55509d56d36f03c1c25762350b6f905
|
python/test/mongo/test_mongo_to_gcs.py
|
Unknown
|
MONGO_DATABASE
| true |
statement
| 123 | 123 | false | false |
[
"FORMAT_MONGO",
"MONGO_COLLECTION",
"MONGO_DATABASE",
"HEADER",
"FORMAT_AVRO",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JDBC",
"FORMAT_JSON",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"GCS_MONGO_BATCH_SIZE",
"GCS_MONGO_INPUT_FORMAT",
"GCS_MONGO_INPUT_LOCATION",
"GCS_MONGO_OUTPUT_COLLECTION",
"GCS_MONGO_OUTPUT_DATABASE",
"GCS_MONGO_OUTPUT_MODE",
"GCS_MONGO_OUTPUT_URI",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INFER_SCHEMA",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_TABLE",
"JDBC_UPPERBOUND",
"JDBC_URL",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"MONGO_BATCH_SIZE",
"MONGO_DEFAULT_BATCH_SIZE",
"MONGO_GCS_INPUT_COLLECTION",
"MONGO_GCS_INPUT_DATABASE",
"MONGO_GCS_INPUT_URI",
"MONGO_GCS_OUTPUT_FORMAT",
"MONGO_GCS_OUTPUT_LOCATION",
"MONGO_GCS_OUTPUT_MODE",
"MONGO_INPUT_URI",
"MONGO_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_MONGO",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "GCS_MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_COLLECTION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_DATABASE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_URI",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_DATABASE",
"type": "statement"
},
{
"name": "MONGO_DEFAULT_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_URI",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "MONGO_INPUT_URI",
"type": "statement"
},
{
"name": "MONGO_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.mongo.mongo_to_gcs import MongoToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestMongoToGCSTemplate:
"""
Test suite for MongoToGCSTemplate
"""
def test_parse_args(self):
"""Tests MongoToGCSTemplate.parse_args()"""
mongo_to_gcs_template = MongoToGCSTemplate()
parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
assert parsed_args["mongo.gcs.input.uri"] == "mongodb://host:port"
assert parsed_args["mongo.gcs.input.database"] == "database"
assert parsed_args["mongo.gcs.input.collection"] == "collection"
assert parsed_args["mongo.gcs.output.format"] == "parquet"
assert parsed_args["mongo.gcs.output.mode"] == "overwrite"
assert parsed_args["mongo.gcs.output.location"] == "gs://test"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for parquet format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_avro(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for avro format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=avro",
"--mongo.gcs.output.mode=append",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.format() \
.save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_csv(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for csv format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=csv",
"--mongo.gcs.output.mode=ignore",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.option.assert_called_once_with(constants.HEADER, True)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.option() \
.csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_json(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for json format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=json",
"--mongo.gcs.output.mode=errorifexists",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.
|
382 | 15 | 8,404 |
googlecloudplatform__dataproc-templates
|
8b291caed55509d56d36f03c1c25762350b6f905
|
python/test/mongo/test_mongo_to_gcs.py
|
Unknown
|
MONGO_COLLECTION
| true |
statement
| 123 | 123 | false | false |
[
"FORMAT_MONGO",
"MONGO_DATABASE",
"MONGO_COLLECTION",
"HEADER",
"FORMAT_AVRO",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JDBC",
"FORMAT_JSON",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"GCS_MONGO_BATCH_SIZE",
"GCS_MONGO_INPUT_FORMAT",
"GCS_MONGO_INPUT_LOCATION",
"GCS_MONGO_OUTPUT_COLLECTION",
"GCS_MONGO_OUTPUT_DATABASE",
"GCS_MONGO_OUTPUT_MODE",
"GCS_MONGO_OUTPUT_URI",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INFER_SCHEMA",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_TABLE",
"JDBC_UPPERBOUND",
"JDBC_URL",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"MONGO_BATCH_SIZE",
"MONGO_DEFAULT_BATCH_SIZE",
"MONGO_GCS_INPUT_COLLECTION",
"MONGO_GCS_INPUT_DATABASE",
"MONGO_GCS_INPUT_URI",
"MONGO_GCS_OUTPUT_FORMAT",
"MONGO_GCS_OUTPUT_LOCATION",
"MONGO_GCS_OUTPUT_MODE",
"MONGO_INPUT_URI",
"MONGO_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_MONGO",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "GCS_MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_COLLECTION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_DATABASE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_URI",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_DATABASE",
"type": "statement"
},
{
"name": "MONGO_DEFAULT_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_URI",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "MONGO_INPUT_URI",
"type": "statement"
},
{
"name": "MONGO_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.mongo.mongo_to_gcs import MongoToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestMongoToGCSTemplate:
"""
Test suite for MongoToGCSTemplate
"""
def test_parse_args(self):
"""Tests MongoToGCSTemplate.parse_args()"""
mongo_to_gcs_template = MongoToGCSTemplate()
parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
assert parsed_args["mongo.gcs.input.uri"] == "mongodb://host:port"
assert parsed_args["mongo.gcs.input.database"] == "database"
assert parsed_args["mongo.gcs.input.collection"] == "collection"
assert parsed_args["mongo.gcs.output.format"] == "parquet"
assert parsed_args["mongo.gcs.output.mode"] == "overwrite"
assert parsed_args["mongo.gcs.output.location"] == "gs://test"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for parquet format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_avro(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for avro format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=avro",
"--mongo.gcs.output.mode=append",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.format() \
.save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_csv(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for csv format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=csv",
"--mongo.gcs.output.mode=ignore",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.option.assert_called_once_with(constants.HEADER, True)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.option() \
.csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_json(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for json format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=json",
"--mongo.gcs.output.mode=errorifexists",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.
|
383 | 15 | 8,711 |
googlecloudplatform__dataproc-templates
|
8b291caed55509d56d36f03c1c25762350b6f905
|
python/test/mongo/test_mongo_to_gcs.py
|
Unknown
|
OUTPUT_MODE_ERRORIFEXISTS
| true |
statement
| 123 | 123 | false | true |
[
"FORMAT_MONGO",
"MONGO_DATABASE",
"MONGO_COLLECTION",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_IGNORE",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JDBC",
"FORMAT_JSON",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"GCS_MONGO_BATCH_SIZE",
"GCS_MONGO_INPUT_FORMAT",
"GCS_MONGO_INPUT_LOCATION",
"GCS_MONGO_OUTPUT_COLLECTION",
"GCS_MONGO_OUTPUT_DATABASE",
"GCS_MONGO_OUTPUT_MODE",
"GCS_MONGO_OUTPUT_URI",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HEADER",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INFER_SCHEMA",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_TABLE",
"JDBC_UPPERBOUND",
"JDBC_URL",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"MONGO_BATCH_SIZE",
"MONGO_DEFAULT_BATCH_SIZE",
"MONGO_GCS_INPUT_COLLECTION",
"MONGO_GCS_INPUT_DATABASE",
"MONGO_GCS_INPUT_URI",
"MONGO_GCS_OUTPUT_FORMAT",
"MONGO_GCS_OUTPUT_LOCATION",
"MONGO_GCS_OUTPUT_MODE",
"MONGO_INPUT_URI",
"MONGO_URL",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_MONGO",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "GCS_MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_COLLECTION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_DATABASE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_URI",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_DATABASE",
"type": "statement"
},
{
"name": "MONGO_DEFAULT_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "MONGO_GCS_INPUT_URI",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "MONGO_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "MONGO_INPUT_URI",
"type": "statement"
},
{
"name": "MONGO_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.mongo.mongo_to_gcs import MongoToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestMongoToGCSTemplate:
"""
Test suite for MongoToGCSTemplate
"""
def test_parse_args(self):
"""Tests MongoToGCSTemplate.parse_args()"""
mongo_to_gcs_template = MongoToGCSTemplate()
parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
assert parsed_args["mongo.gcs.input.uri"] == "mongodb://host:port"
assert parsed_args["mongo.gcs.input.database"] == "database"
assert parsed_args["mongo.gcs.input.collection"] == "collection"
assert parsed_args["mongo.gcs.output.format"] == "parquet"
assert parsed_args["mongo.gcs.output.mode"] == "overwrite"
assert parsed_args["mongo.gcs.output.location"] == "gs://test"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for parquet format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=parquet",
"--mongo.gcs.output.mode=overwrite",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_avro(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for avro format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=avro",
"--mongo.gcs.output.mode=append",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_APPEND)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.format() \
.save.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_csv(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for csv format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=csv",
"--mongo.gcs.output.mode=ignore",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_IGNORE)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.option.assert_called_once_with(constants.HEADER, True)
mock_spark_session.dataframe.DataFrame.write \
.mode() \
.option() \
.csv.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_json(self, mock_spark_session):
"""Tests MongoToGCSTemplate runs for json format output"""
mongo_to_gcs_template = MongoToGCSTemplate()
mock_parsed_args = mongo_to_gcs_template.parse_args(
["--mongo.gcs.input.uri=mongodb://host:port",
"--mongo.gcs.input.database=database",
"--mongo.gcs.input.collection=collection",
"--mongo.gcs.output.format=json",
"--mongo.gcs.output.mode=errorifexists",
"--mongo.gcs.output.location=gs://test"])
mock_spark_session.read.format().option().option().option().load.return_value \
= mock_spark_session.dataframe.DataFrame
mongo_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read \
.format.assert_called_with(constants.FORMAT_MONGO)
mock_spark_session.read \
.format() \
.option() \
.option.assert_called_with(constants.MONGO_DATABASE,"database")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option.assert_called_with(constants.MONGO_COLLECTION,"collection")
mock_spark_session.read \
.format() \
.option() \
.option() \
.option() \
.load.assert_called_with()
mock_spark_session.dataframe.DataFrame.write \
.mode.assert_called_once_with(constants.
|
384 | 16 | 990 |
googlecloudplatform__dataproc-templates
|
49e82f63f0e49578ce6451902da57a095bc02b5d
|
python/test/gcs/test_gcs_to_gcs.py
|
inproject
|
parse_args
| true |
function
| 4 | 4 | false | true |
[
"parse_args",
"run",
"build",
"get_logger",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "build",
"type": "function"
},
{
"name": "get_logger",
"type": "function"
},
{
"name": "parse_args",
"type": "function"
},
{
"name": "run",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from google.cloud import storage
from dataproc_templates.gcs.gcs_to_gcs import GCSToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestGCSToGCSTemplate:
"""
Test suite for GCSToBigQueryTemplate
"""
def test_parse_args(self):
gcs_to_gcs_template = GCSToGCSTemplate()
parsed_args = gcs_to_gcs_template.
|
385 | 16 | 2,259 |
googlecloudplatform__dataproc-templates
|
49e82f63f0e49578ce6451902da57a095bc02b5d
|
python/test/gcs/test_gcs_to_gcs.py
|
Unknown
|
parse_args
| true |
function
| 4 | 4 | false | false |
[
"parse_args",
"run",
"build",
"get_logger",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "build",
"type": "function"
},
{
"name": "get_logger",
"type": "function"
},
{
"name": "parse_args",
"type": "function"
},
{
"name": "run",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from google.cloud import storage
from dataproc_templates.gcs.gcs_to_gcs import GCSToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestGCSToGCSTemplate:
"""
Test suite for GCSToBigQueryTemplate
"""
def test_parse_args(self):
gcs_to_gcs_template = GCSToGCSTemplate()
parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=csv",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=csv",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
assert parsed_args["gcs.to.gcs.input.location"] == "gs://input"
assert parsed_args["gcs.to.gcs.input.format"] == "csv"
assert parsed_args["gcs.to.gcs.temp.view.name"] == "temp"
assert parsed_args["gcs.to.gcs.sql.query"] == "select * from temp"
assert parsed_args["gcs.to.gcs.output.format"] == "csv"
assert parsed_args["gcs.to.gcs.output.mode"] == "overwrite"
assert parsed_args["gcs.to.gcs.output.partition.column"] == "column"
assert parsed_args["gcs.to.gcs.output.location"] == "gs://output"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.
|
386 | 16 | 2,834 |
googlecloudplatform__dataproc-templates
|
49e82f63f0e49578ce6451902da57a095bc02b5d
|
python/test/gcs/test_gcs_to_gcs.py
|
Unknown
|
run
| true |
function
| 4 | 4 | false | true |
[
"run",
"parse_args",
"build",
"get_logger",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "build",
"type": "function"
},
{
"name": "get_logger",
"type": "function"
},
{
"name": "parse_args",
"type": "function"
},
{
"name": "run",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from google.cloud import storage
from dataproc_templates.gcs.gcs_to_gcs import GCSToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestGCSToGCSTemplate:
"""
Test suite for GCSToBigQueryTemplate
"""
def test_parse_args(self):
gcs_to_gcs_template = GCSToGCSTemplate()
parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=csv",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=csv",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
assert parsed_args["gcs.to.gcs.input.location"] == "gs://input"
assert parsed_args["gcs.to.gcs.input.format"] == "csv"
assert parsed_args["gcs.to.gcs.temp.view.name"] == "temp"
assert parsed_args["gcs.to.gcs.sql.query"] == "select * from temp"
assert parsed_args["gcs.to.gcs.output.format"] == "csv"
assert parsed_args["gcs.to.gcs.output.mode"] == "overwrite"
assert parsed_args["gcs.to.gcs.output.partition.column"] == "column"
assert parsed_args["gcs.to.gcs.output.location"] == "gs://output"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=parquet",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=parquet",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.parquet.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.
|
387 | 16 | 3,223 |
googlecloudplatform__dataproc-templates
|
49e82f63f0e49578ce6451902da57a095bc02b5d
|
python/test/gcs/test_gcs_to_gcs.py
|
Unknown
|
OUTPUT_MODE_OVERWRITE
| true |
statement
| 124 | 124 | false | true |
[
"OUTPUT_MODE_OVERWRITE",
"HEADER",
"FORMAT_AVRO",
"FORMAT_CSV",
"INFER_SCHEMA",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_HBASE",
"FORMAT_JDBC",
"FORMAT_JSON",
"FORMAT_MONGO",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"GCS_MONGO_BATCH_SIZE",
"GCS_MONGO_INPUT_FORMAT",
"GCS_MONGO_INPUT_LOCATION",
"GCS_MONGO_OUTPUT_COLLECTION",
"GCS_MONGO_OUTPUT_DATABASE",
"GCS_MONGO_OUTPUT_MODE",
"GCS_MONGO_OUTPUT_URI",
"GCS_TO_GCS_INPUT_FORMAT",
"GCS_TO_GCS_INPUT_LOCATION",
"GCS_TO_GCS_OUTPUT_FORMAT",
"GCS_TO_GCS_OUTPUT_LOCATION",
"GCS_TO_GCS_OUTPUT_MODE",
"GCS_TO_GCS_OUTPUT_PARTITION_COLUMN",
"GCS_TO_GCS_SQL_QUERY",
"GCS_TO_GCS_TEMP_VIEW_NAME",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_TABLE",
"JDBC_UPPERBOUND",
"JDBC_URL",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"MONGO_BATCH_SIZE",
"MONGO_COLLECTION",
"MONGO_DATABASE",
"MONGO_DEFAULT_BATCH_SIZE",
"MONGO_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_MONGO",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "GCS_MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_COLLECTION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_DATABASE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_URI",
"type": "statement"
},
{
"name": "GCS_TO_GCS_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_TO_GCS_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_PARTITION_COLUMN",
"type": "statement"
},
{
"name": "GCS_TO_GCS_SQL_QUERY",
"type": "statement"
},
{
"name": "GCS_TO_GCS_TEMP_VIEW_NAME",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_DATABASE",
"type": "statement"
},
{
"name": "MONGO_DEFAULT_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from google.cloud import storage
from dataproc_templates.gcs.gcs_to_gcs import GCSToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestGCSToGCSTemplate:
"""
Test suite for GCSToBigQueryTemplate
"""
def test_parse_args(self):
gcs_to_gcs_template = GCSToGCSTemplate()
parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=csv",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=csv",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
assert parsed_args["gcs.to.gcs.input.location"] == "gs://input"
assert parsed_args["gcs.to.gcs.input.format"] == "csv"
assert parsed_args["gcs.to.gcs.temp.view.name"] == "temp"
assert parsed_args["gcs.to.gcs.sql.query"] == "select * from temp"
assert parsed_args["gcs.to.gcs.output.format"] == "csv"
assert parsed_args["gcs.to.gcs.output.mode"] == "overwrite"
assert parsed_args["gcs.to.gcs.output.partition.column"] == "column"
assert parsed_args["gcs.to.gcs.output.location"] == "gs://output"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=parquet",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=parquet",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.parquet.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.parquet.assert_called_once_with("gs://input")
mock_spark_session.read.parquet().createOrReplaceTempView.assert_called_once_with("temp")
mock_spark_session.sql.assert_called_once_with("select * from temp")
mock_spark_session.sql().write \
.mode.assert_called_once_with(constants.
|
388 | 16 | 3,797 |
googlecloudplatform__dataproc-templates
|
49e82f63f0e49578ce6451902da57a095bc02b5d
|
python/test/gcs/test_gcs_to_gcs.py
|
inproject
|
parse_args
| true |
function
| 4 | 4 | false | false |
[
"parse_args",
"run",
"build",
"get_logger",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "build",
"type": "function"
},
{
"name": "get_logger",
"type": "function"
},
{
"name": "parse_args",
"type": "function"
},
{
"name": "run",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from google.cloud import storage
from dataproc_templates.gcs.gcs_to_gcs import GCSToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestGCSToGCSTemplate:
"""
Test suite for GCSToBigQueryTemplate
"""
def test_parse_args(self):
gcs_to_gcs_template = GCSToGCSTemplate()
parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=csv",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=csv",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
assert parsed_args["gcs.to.gcs.input.location"] == "gs://input"
assert parsed_args["gcs.to.gcs.input.format"] == "csv"
assert parsed_args["gcs.to.gcs.temp.view.name"] == "temp"
assert parsed_args["gcs.to.gcs.sql.query"] == "select * from temp"
assert parsed_args["gcs.to.gcs.output.format"] == "csv"
assert parsed_args["gcs.to.gcs.output.mode"] == "overwrite"
assert parsed_args["gcs.to.gcs.output.partition.column"] == "column"
assert parsed_args["gcs.to.gcs.output.location"] == "gs://output"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=parquet",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=parquet",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.parquet.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.parquet.assert_called_once_with("gs://input")
mock_spark_session.read.parquet().createOrReplaceTempView.assert_called_once_with("temp")
mock_spark_session.sql.assert_called_once_with("select * from temp")
mock_spark_session.sql().write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.sql().write \
.mode() \
.partitionBy.assert_called_once_with("column")
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.parquet.assert_called_once_with("gs://output")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_csv(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.
|
389 | 16 | 4,360 |
googlecloudplatform__dataproc-templates
|
49e82f63f0e49578ce6451902da57a095bc02b5d
|
python/test/gcs/test_gcs_to_gcs.py
|
Unknown
|
run
| true |
function
| 4 | 4 | false | false |
[
"run",
"parse_args",
"build",
"get_logger",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "build",
"type": "function"
},
{
"name": "get_logger",
"type": "function"
},
{
"name": "parse_args",
"type": "function"
},
{
"name": "run",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from google.cloud import storage
from dataproc_templates.gcs.gcs_to_gcs import GCSToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestGCSToGCSTemplate:
"""
Test suite for GCSToBigQueryTemplate
"""
def test_parse_args(self):
gcs_to_gcs_template = GCSToGCSTemplate()
parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=csv",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=csv",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
assert parsed_args["gcs.to.gcs.input.location"] == "gs://input"
assert parsed_args["gcs.to.gcs.input.format"] == "csv"
assert parsed_args["gcs.to.gcs.temp.view.name"] == "temp"
assert parsed_args["gcs.to.gcs.sql.query"] == "select * from temp"
assert parsed_args["gcs.to.gcs.output.format"] == "csv"
assert parsed_args["gcs.to.gcs.output.mode"] == "overwrite"
assert parsed_args["gcs.to.gcs.output.partition.column"] == "column"
assert parsed_args["gcs.to.gcs.output.location"] == "gs://output"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=parquet",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=parquet",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.parquet.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.parquet.assert_called_once_with("gs://input")
mock_spark_session.read.parquet().createOrReplaceTempView.assert_called_once_with("temp")
mock_spark_session.sql.assert_called_once_with("select * from temp")
mock_spark_session.sql().write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.sql().write \
.mode() \
.partitionBy.assert_called_once_with("column")
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.parquet.assert_called_once_with("gs://output")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_csv(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=csv",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=csv",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.csv.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.
|
390 | 16 | 4,476 |
googlecloudplatform__dataproc-templates
|
49e82f63f0e49578ce6451902da57a095bc02b5d
|
python/test/gcs/test_gcs_to_gcs.py
|
Unknown
|
FORMAT_CSV
| true |
statement
| 124 | 124 | false | true |
[
"FORMAT_AVRO",
"HEADER",
"OUTPUT_MODE_OVERWRITE",
"INFER_SCHEMA",
"FORMAT_CSV",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_HBASE",
"FORMAT_JDBC",
"FORMAT_JSON",
"FORMAT_MONGO",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"GCS_MONGO_BATCH_SIZE",
"GCS_MONGO_INPUT_FORMAT",
"GCS_MONGO_INPUT_LOCATION",
"GCS_MONGO_OUTPUT_COLLECTION",
"GCS_MONGO_OUTPUT_DATABASE",
"GCS_MONGO_OUTPUT_MODE",
"GCS_MONGO_OUTPUT_URI",
"GCS_TO_GCS_INPUT_FORMAT",
"GCS_TO_GCS_INPUT_LOCATION",
"GCS_TO_GCS_OUTPUT_FORMAT",
"GCS_TO_GCS_OUTPUT_LOCATION",
"GCS_TO_GCS_OUTPUT_MODE",
"GCS_TO_GCS_OUTPUT_PARTITION_COLUMN",
"GCS_TO_GCS_SQL_QUERY",
"GCS_TO_GCS_TEMP_VIEW_NAME",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_TABLE",
"JDBC_UPPERBOUND",
"JDBC_URL",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"MONGO_BATCH_SIZE",
"MONGO_COLLECTION",
"MONGO_DATABASE",
"MONGO_DEFAULT_BATCH_SIZE",
"MONGO_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_MONGO",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "GCS_MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_COLLECTION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_DATABASE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_URI",
"type": "statement"
},
{
"name": "GCS_TO_GCS_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_TO_GCS_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_PARTITION_COLUMN",
"type": "statement"
},
{
"name": "GCS_TO_GCS_SQL_QUERY",
"type": "statement"
},
{
"name": "GCS_TO_GCS_TEMP_VIEW_NAME",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_DATABASE",
"type": "statement"
},
{
"name": "MONGO_DEFAULT_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from google.cloud import storage
from dataproc_templates.gcs.gcs_to_gcs import GCSToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestGCSToGCSTemplate:
"""
Test suite for GCSToBigQueryTemplate
"""
def test_parse_args(self):
gcs_to_gcs_template = GCSToGCSTemplate()
parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=csv",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=csv",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
assert parsed_args["gcs.to.gcs.input.location"] == "gs://input"
assert parsed_args["gcs.to.gcs.input.format"] == "csv"
assert parsed_args["gcs.to.gcs.temp.view.name"] == "temp"
assert parsed_args["gcs.to.gcs.sql.query"] == "select * from temp"
assert parsed_args["gcs.to.gcs.output.format"] == "csv"
assert parsed_args["gcs.to.gcs.output.mode"] == "overwrite"
assert parsed_args["gcs.to.gcs.output.partition.column"] == "column"
assert parsed_args["gcs.to.gcs.output.location"] == "gs://output"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=parquet",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=parquet",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.parquet.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.parquet.assert_called_once_with("gs://input")
mock_spark_session.read.parquet().createOrReplaceTempView.assert_called_once_with("temp")
mock_spark_session.sql.assert_called_once_with("select * from temp")
mock_spark_session.sql().write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.sql().write \
.mode() \
.partitionBy.assert_called_once_with("column")
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.parquet.assert_called_once_with("gs://output")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_csv(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=csv",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=csv",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.csv.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_once_with(constants.
|
391 | 16 | 4,585 |
googlecloudplatform__dataproc-templates
|
49e82f63f0e49578ce6451902da57a095bc02b5d
|
python/test/gcs/test_gcs_to_gcs.py
|
Unknown
|
HEADER
| true |
statement
| 124 | 124 | false | true |
[
"FORMAT_CSV",
"OUTPUT_MODE_OVERWRITE",
"FORMAT_AVRO",
"HEADER",
"INFER_SCHEMA",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_HBASE",
"FORMAT_JDBC",
"FORMAT_JSON",
"FORMAT_MONGO",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"GCS_MONGO_BATCH_SIZE",
"GCS_MONGO_INPUT_FORMAT",
"GCS_MONGO_INPUT_LOCATION",
"GCS_MONGO_OUTPUT_COLLECTION",
"GCS_MONGO_OUTPUT_DATABASE",
"GCS_MONGO_OUTPUT_MODE",
"GCS_MONGO_OUTPUT_URI",
"GCS_TO_GCS_INPUT_FORMAT",
"GCS_TO_GCS_INPUT_LOCATION",
"GCS_TO_GCS_OUTPUT_FORMAT",
"GCS_TO_GCS_OUTPUT_LOCATION",
"GCS_TO_GCS_OUTPUT_MODE",
"GCS_TO_GCS_OUTPUT_PARTITION_COLUMN",
"GCS_TO_GCS_SQL_QUERY",
"GCS_TO_GCS_TEMP_VIEW_NAME",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_TABLE",
"JDBC_UPPERBOUND",
"JDBC_URL",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"MONGO_BATCH_SIZE",
"MONGO_COLLECTION",
"MONGO_DATABASE",
"MONGO_DEFAULT_BATCH_SIZE",
"MONGO_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_MONGO",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "GCS_MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_COLLECTION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_DATABASE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_URI",
"type": "statement"
},
{
"name": "GCS_TO_GCS_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_TO_GCS_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_PARTITION_COLUMN",
"type": "statement"
},
{
"name": "GCS_TO_GCS_SQL_QUERY",
"type": "statement"
},
{
"name": "GCS_TO_GCS_TEMP_VIEW_NAME",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_DATABASE",
"type": "statement"
},
{
"name": "MONGO_DEFAULT_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from google.cloud import storage
from dataproc_templates.gcs.gcs_to_gcs import GCSToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestGCSToGCSTemplate:
"""
Test suite for GCSToBigQueryTemplate
"""
def test_parse_args(self):
gcs_to_gcs_template = GCSToGCSTemplate()
parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=csv",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=csv",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
assert parsed_args["gcs.to.gcs.input.location"] == "gs://input"
assert parsed_args["gcs.to.gcs.input.format"] == "csv"
assert parsed_args["gcs.to.gcs.temp.view.name"] == "temp"
assert parsed_args["gcs.to.gcs.sql.query"] == "select * from temp"
assert parsed_args["gcs.to.gcs.output.format"] == "csv"
assert parsed_args["gcs.to.gcs.output.mode"] == "overwrite"
assert parsed_args["gcs.to.gcs.output.partition.column"] == "column"
assert parsed_args["gcs.to.gcs.output.location"] == "gs://output"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=parquet",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=parquet",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.parquet.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.parquet.assert_called_once_with("gs://input")
mock_spark_session.read.parquet().createOrReplaceTempView.assert_called_once_with("temp")
mock_spark_session.sql.assert_called_once_with("select * from temp")
mock_spark_session.sql().write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.sql().write \
.mode() \
.partitionBy.assert_called_once_with("column")
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.parquet.assert_called_once_with("gs://output")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_csv(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=csv",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=csv",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.csv.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_once_with(constants.FORMAT_CSV)
mock_spark_session.read.format() \
.option.assert_called_once_with(constants.
|
392 | 16 | 4,720 |
googlecloudplatform__dataproc-templates
|
49e82f63f0e49578ce6451902da57a095bc02b5d
|
python/test/gcs/test_gcs_to_gcs.py
|
Unknown
|
INFER_SCHEMA
| true |
statement
| 124 | 124 | false | true |
[
"HEADER",
"FORMAT_CSV",
"OUTPUT_MODE_OVERWRITE",
"FORMAT_AVRO",
"GCS_TO_GCS_INPUT_FORMAT",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_HBASE",
"FORMAT_JDBC",
"FORMAT_JSON",
"FORMAT_MONGO",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"GCS_MONGO_BATCH_SIZE",
"GCS_MONGO_INPUT_FORMAT",
"GCS_MONGO_INPUT_LOCATION",
"GCS_MONGO_OUTPUT_COLLECTION",
"GCS_MONGO_OUTPUT_DATABASE",
"GCS_MONGO_OUTPUT_MODE",
"GCS_MONGO_OUTPUT_URI",
"GCS_TO_GCS_INPUT_LOCATION",
"GCS_TO_GCS_OUTPUT_FORMAT",
"GCS_TO_GCS_OUTPUT_LOCATION",
"GCS_TO_GCS_OUTPUT_MODE",
"GCS_TO_GCS_OUTPUT_PARTITION_COLUMN",
"GCS_TO_GCS_SQL_QUERY",
"GCS_TO_GCS_TEMP_VIEW_NAME",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INFER_SCHEMA",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_TABLE",
"JDBC_UPPERBOUND",
"JDBC_URL",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"MONGO_BATCH_SIZE",
"MONGO_COLLECTION",
"MONGO_DATABASE",
"MONGO_DEFAULT_BATCH_SIZE",
"MONGO_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_MONGO",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "GCS_MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_COLLECTION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_DATABASE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_URI",
"type": "statement"
},
{
"name": "GCS_TO_GCS_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_TO_GCS_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_PARTITION_COLUMN",
"type": "statement"
},
{
"name": "GCS_TO_GCS_SQL_QUERY",
"type": "statement"
},
{
"name": "GCS_TO_GCS_TEMP_VIEW_NAME",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_DATABASE",
"type": "statement"
},
{
"name": "MONGO_DEFAULT_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from google.cloud import storage
from dataproc_templates.gcs.gcs_to_gcs import GCSToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestGCSToGCSTemplate:
"""
Test suite for GCSToBigQueryTemplate
"""
def test_parse_args(self):
gcs_to_gcs_template = GCSToGCSTemplate()
parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=csv",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=csv",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
assert parsed_args["gcs.to.gcs.input.location"] == "gs://input"
assert parsed_args["gcs.to.gcs.input.format"] == "csv"
assert parsed_args["gcs.to.gcs.temp.view.name"] == "temp"
assert parsed_args["gcs.to.gcs.sql.query"] == "select * from temp"
assert parsed_args["gcs.to.gcs.output.format"] == "csv"
assert parsed_args["gcs.to.gcs.output.mode"] == "overwrite"
assert parsed_args["gcs.to.gcs.output.partition.column"] == "column"
assert parsed_args["gcs.to.gcs.output.location"] == "gs://output"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=parquet",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=parquet",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.parquet.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.parquet.assert_called_once_with("gs://input")
mock_spark_session.read.parquet().createOrReplaceTempView.assert_called_once_with("temp")
mock_spark_session.sql.assert_called_once_with("select * from temp")
mock_spark_session.sql().write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.sql().write \
.mode() \
.partitionBy.assert_called_once_with("column")
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.parquet.assert_called_once_with("gs://output")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_csv(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=csv",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=csv",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.csv.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_once_with(constants.FORMAT_CSV)
mock_spark_session.read.format() \
.option.assert_called_once_with(constants.HEADER, True)
mock_spark_session.read.format() \
.option() \
.option.assert_called_once_with(constants.
|
393 | 16 | 5,252 |
googlecloudplatform__dataproc-templates
|
49e82f63f0e49578ce6451902da57a095bc02b5d
|
python/test/gcs/test_gcs_to_gcs.py
|
Unknown
|
OUTPUT_MODE_OVERWRITE
| true |
statement
| 124 | 124 | false | false |
[
"OUTPUT_MODE_OVERWRITE",
"HEADER",
"FORMAT_CSV",
"FORMAT_AVRO",
"INFER_SCHEMA",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_HBASE",
"FORMAT_JDBC",
"FORMAT_JSON",
"FORMAT_MONGO",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"GCS_MONGO_BATCH_SIZE",
"GCS_MONGO_INPUT_FORMAT",
"GCS_MONGO_INPUT_LOCATION",
"GCS_MONGO_OUTPUT_COLLECTION",
"GCS_MONGO_OUTPUT_DATABASE",
"GCS_MONGO_OUTPUT_MODE",
"GCS_MONGO_OUTPUT_URI",
"GCS_TO_GCS_INPUT_FORMAT",
"GCS_TO_GCS_INPUT_LOCATION",
"GCS_TO_GCS_OUTPUT_FORMAT",
"GCS_TO_GCS_OUTPUT_LOCATION",
"GCS_TO_GCS_OUTPUT_MODE",
"GCS_TO_GCS_OUTPUT_PARTITION_COLUMN",
"GCS_TO_GCS_SQL_QUERY",
"GCS_TO_GCS_TEMP_VIEW_NAME",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_TABLE",
"JDBC_UPPERBOUND",
"JDBC_URL",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"MONGO_BATCH_SIZE",
"MONGO_COLLECTION",
"MONGO_DATABASE",
"MONGO_DEFAULT_BATCH_SIZE",
"MONGO_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_MONGO",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "GCS_MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_COLLECTION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_DATABASE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_URI",
"type": "statement"
},
{
"name": "GCS_TO_GCS_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_TO_GCS_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_PARTITION_COLUMN",
"type": "statement"
},
{
"name": "GCS_TO_GCS_SQL_QUERY",
"type": "statement"
},
{
"name": "GCS_TO_GCS_TEMP_VIEW_NAME",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_DATABASE",
"type": "statement"
},
{
"name": "MONGO_DEFAULT_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from google.cloud import storage
from dataproc_templates.gcs.gcs_to_gcs import GCSToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestGCSToGCSTemplate:
"""
Test suite for GCSToBigQueryTemplate
"""
def test_parse_args(self):
gcs_to_gcs_template = GCSToGCSTemplate()
parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=csv",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=csv",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
assert parsed_args["gcs.to.gcs.input.location"] == "gs://input"
assert parsed_args["gcs.to.gcs.input.format"] == "csv"
assert parsed_args["gcs.to.gcs.temp.view.name"] == "temp"
assert parsed_args["gcs.to.gcs.sql.query"] == "select * from temp"
assert parsed_args["gcs.to.gcs.output.format"] == "csv"
assert parsed_args["gcs.to.gcs.output.mode"] == "overwrite"
assert parsed_args["gcs.to.gcs.output.partition.column"] == "column"
assert parsed_args["gcs.to.gcs.output.location"] == "gs://output"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=parquet",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=parquet",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.parquet.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.parquet.assert_called_once_with("gs://input")
mock_spark_session.read.parquet().createOrReplaceTempView.assert_called_once_with("temp")
mock_spark_session.sql.assert_called_once_with("select * from temp")
mock_spark_session.sql().write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.sql().write \
.mode() \
.partitionBy.assert_called_once_with("column")
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.parquet.assert_called_once_with("gs://output")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_csv(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=csv",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=csv",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.csv.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_once_with(constants.FORMAT_CSV)
mock_spark_session.read.format() \
.option.assert_called_once_with(constants.HEADER, True)
mock_spark_session.read.format() \
.option() \
.option.assert_called_once_with(constants.INFER_SCHEMA, True)
mock_spark_session.read.format() \
.option() \
.option() \
.load.assert_called_once_with("gs://input")
mock_spark_session.read.format() \
.option() \
.option() \
.load() \
.createOrReplaceTempView.assert_called_once_with("temp")
mock_spark_session.sql.assert_called_once_with("select * from temp")
mock_spark_session.sql().write \
.mode.assert_called_once_with(constants.
|
394 | 16 | 5,543 |
googlecloudplatform__dataproc-templates
|
49e82f63f0e49578ce6451902da57a095bc02b5d
|
python/test/gcs/test_gcs_to_gcs.py
|
Unknown
|
HEADER
| true |
statement
| 124 | 124 | false | false |
[
"FORMAT_CSV",
"OUTPUT_MODE_OVERWRITE",
"FORMAT_AVRO",
"HEADER",
"INFER_SCHEMA",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_HBASE",
"FORMAT_JDBC",
"FORMAT_JSON",
"FORMAT_MONGO",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"GCS_MONGO_BATCH_SIZE",
"GCS_MONGO_INPUT_FORMAT",
"GCS_MONGO_INPUT_LOCATION",
"GCS_MONGO_OUTPUT_COLLECTION",
"GCS_MONGO_OUTPUT_DATABASE",
"GCS_MONGO_OUTPUT_MODE",
"GCS_MONGO_OUTPUT_URI",
"GCS_TO_GCS_INPUT_FORMAT",
"GCS_TO_GCS_INPUT_LOCATION",
"GCS_TO_GCS_OUTPUT_FORMAT",
"GCS_TO_GCS_OUTPUT_LOCATION",
"GCS_TO_GCS_OUTPUT_MODE",
"GCS_TO_GCS_OUTPUT_PARTITION_COLUMN",
"GCS_TO_GCS_SQL_QUERY",
"GCS_TO_GCS_TEMP_VIEW_NAME",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_TABLE",
"JDBC_UPPERBOUND",
"JDBC_URL",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"MONGO_BATCH_SIZE",
"MONGO_COLLECTION",
"MONGO_DATABASE",
"MONGO_DEFAULT_BATCH_SIZE",
"MONGO_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_MONGO",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "GCS_MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_COLLECTION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_DATABASE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_URI",
"type": "statement"
},
{
"name": "GCS_TO_GCS_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_TO_GCS_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_PARTITION_COLUMN",
"type": "statement"
},
{
"name": "GCS_TO_GCS_SQL_QUERY",
"type": "statement"
},
{
"name": "GCS_TO_GCS_TEMP_VIEW_NAME",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_DATABASE",
"type": "statement"
},
{
"name": "MONGO_DEFAULT_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from google.cloud import storage
from dataproc_templates.gcs.gcs_to_gcs import GCSToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestGCSToGCSTemplate:
"""
Test suite for GCSToBigQueryTemplate
"""
def test_parse_args(self):
gcs_to_gcs_template = GCSToGCSTemplate()
parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=csv",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=csv",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
assert parsed_args["gcs.to.gcs.input.location"] == "gs://input"
assert parsed_args["gcs.to.gcs.input.format"] == "csv"
assert parsed_args["gcs.to.gcs.temp.view.name"] == "temp"
assert parsed_args["gcs.to.gcs.sql.query"] == "select * from temp"
assert parsed_args["gcs.to.gcs.output.format"] == "csv"
assert parsed_args["gcs.to.gcs.output.mode"] == "overwrite"
assert parsed_args["gcs.to.gcs.output.partition.column"] == "column"
assert parsed_args["gcs.to.gcs.output.location"] == "gs://output"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=parquet",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=parquet",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.parquet.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.parquet.assert_called_once_with("gs://input")
mock_spark_session.read.parquet().createOrReplaceTempView.assert_called_once_with("temp")
mock_spark_session.sql.assert_called_once_with("select * from temp")
mock_spark_session.sql().write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.sql().write \
.mode() \
.partitionBy.assert_called_once_with("column")
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.parquet.assert_called_once_with("gs://output")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_csv(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=csv",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=csv",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.csv.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_once_with(constants.FORMAT_CSV)
mock_spark_session.read.format() \
.option.assert_called_once_with(constants.HEADER, True)
mock_spark_session.read.format() \
.option() \
.option.assert_called_once_with(constants.INFER_SCHEMA, True)
mock_spark_session.read.format() \
.option() \
.option() \
.load.assert_called_once_with("gs://input")
mock_spark_session.read.format() \
.option() \
.option() \
.load() \
.createOrReplaceTempView.assert_called_once_with("temp")
mock_spark_session.sql.assert_called_once_with("select * from temp")
mock_spark_session.sql().write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.sql().write \
.mode() \
.partitionBy.assert_called_once_with("column")
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.option.assert_called_once_with(constants.
|
395 | 16 | 6,020 |
googlecloudplatform__dataproc-templates
|
49e82f63f0e49578ce6451902da57a095bc02b5d
|
python/test/gcs/test_gcs_to_gcs.py
|
Unknown
|
parse_args
| true |
function
| 4 | 4 | false | false |
[
"parse_args",
"run",
"build",
"get_logger",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "build",
"type": "function"
},
{
"name": "get_logger",
"type": "function"
},
{
"name": "parse_args",
"type": "function"
},
{
"name": "run",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from google.cloud import storage
from dataproc_templates.gcs.gcs_to_gcs import GCSToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestGCSToGCSTemplate:
"""
Test suite for GCSToBigQueryTemplate
"""
def test_parse_args(self):
gcs_to_gcs_template = GCSToGCSTemplate()
parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=csv",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=csv",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
assert parsed_args["gcs.to.gcs.input.location"] == "gs://input"
assert parsed_args["gcs.to.gcs.input.format"] == "csv"
assert parsed_args["gcs.to.gcs.temp.view.name"] == "temp"
assert parsed_args["gcs.to.gcs.sql.query"] == "select * from temp"
assert parsed_args["gcs.to.gcs.output.format"] == "csv"
assert parsed_args["gcs.to.gcs.output.mode"] == "overwrite"
assert parsed_args["gcs.to.gcs.output.partition.column"] == "column"
assert parsed_args["gcs.to.gcs.output.location"] == "gs://output"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=parquet",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=parquet",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.parquet.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.parquet.assert_called_once_with("gs://input")
mock_spark_session.read.parquet().createOrReplaceTempView.assert_called_once_with("temp")
mock_spark_session.sql.assert_called_once_with("select * from temp")
mock_spark_session.sql().write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.sql().write \
.mode() \
.partitionBy.assert_called_once_with("column")
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.parquet.assert_called_once_with("gs://output")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_csv(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=csv",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=csv",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.csv.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_once_with(constants.FORMAT_CSV)
mock_spark_session.read.format() \
.option.assert_called_once_with(constants.HEADER, True)
mock_spark_session.read.format() \
.option() \
.option.assert_called_once_with(constants.INFER_SCHEMA, True)
mock_spark_session.read.format() \
.option() \
.option() \
.load.assert_called_once_with("gs://input")
mock_spark_session.read.format() \
.option() \
.option() \
.load() \
.createOrReplaceTempView.assert_called_once_with("temp")
mock_spark_session.sql.assert_called_once_with("select * from temp")
mock_spark_session.sql().write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.sql().write \
.mode() \
.partitionBy.assert_called_once_with("column")
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.option.assert_called_once_with(constants.HEADER, True)
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.option() \
.csv.assert_called_once_with("gs://output")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_avro(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.
|
396 | 16 | 6,585 |
googlecloudplatform__dataproc-templates
|
49e82f63f0e49578ce6451902da57a095bc02b5d
|
python/test/gcs/test_gcs_to_gcs.py
|
common
|
run
| true |
function
| 4 | 4 | false | false |
[
"run",
"parse_args",
"build",
"get_logger",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "build",
"type": "function"
},
{
"name": "get_logger",
"type": "function"
},
{
"name": "parse_args",
"type": "function"
},
{
"name": "run",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from google.cloud import storage
from dataproc_templates.gcs.gcs_to_gcs import GCSToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestGCSToGCSTemplate:
"""
Test suite for GCSToBigQueryTemplate
"""
def test_parse_args(self):
gcs_to_gcs_template = GCSToGCSTemplate()
parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=csv",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=csv",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
assert parsed_args["gcs.to.gcs.input.location"] == "gs://input"
assert parsed_args["gcs.to.gcs.input.format"] == "csv"
assert parsed_args["gcs.to.gcs.temp.view.name"] == "temp"
assert parsed_args["gcs.to.gcs.sql.query"] == "select * from temp"
assert parsed_args["gcs.to.gcs.output.format"] == "csv"
assert parsed_args["gcs.to.gcs.output.mode"] == "overwrite"
assert parsed_args["gcs.to.gcs.output.partition.column"] == "column"
assert parsed_args["gcs.to.gcs.output.location"] == "gs://output"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=parquet",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=parquet",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.parquet.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.parquet.assert_called_once_with("gs://input")
mock_spark_session.read.parquet().createOrReplaceTempView.assert_called_once_with("temp")
mock_spark_session.sql.assert_called_once_with("select * from temp")
mock_spark_session.sql().write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.sql().write \
.mode() \
.partitionBy.assert_called_once_with("column")
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.parquet.assert_called_once_with("gs://output")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_csv(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=csv",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=csv",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.csv.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_once_with(constants.FORMAT_CSV)
mock_spark_session.read.format() \
.option.assert_called_once_with(constants.HEADER, True)
mock_spark_session.read.format() \
.option() \
.option.assert_called_once_with(constants.INFER_SCHEMA, True)
mock_spark_session.read.format() \
.option() \
.option() \
.load.assert_called_once_with("gs://input")
mock_spark_session.read.format() \
.option() \
.option() \
.load() \
.createOrReplaceTempView.assert_called_once_with("temp")
mock_spark_session.sql.assert_called_once_with("select * from temp")
mock_spark_session.sql().write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.sql().write \
.mode() \
.partitionBy.assert_called_once_with("column")
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.option.assert_called_once_with(constants.HEADER, True)
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.option() \
.csv.assert_called_once_with("gs://output")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_avro(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=avro",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=avro",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.csv.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.
|
397 | 16 | 6,701 |
googlecloudplatform__dataproc-templates
|
49e82f63f0e49578ce6451902da57a095bc02b5d
|
python/test/gcs/test_gcs_to_gcs.py
|
Unknown
|
FORMAT_AVRO
| true |
statement
| 124 | 124 | false | true |
[
"FORMAT_AVRO",
"FORMAT_CSV",
"HEADER",
"OUTPUT_MODE_OVERWRITE",
"INFER_SCHEMA",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_HBASE",
"FORMAT_JDBC",
"FORMAT_JSON",
"FORMAT_MONGO",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"GCS_MONGO_BATCH_SIZE",
"GCS_MONGO_INPUT_FORMAT",
"GCS_MONGO_INPUT_LOCATION",
"GCS_MONGO_OUTPUT_COLLECTION",
"GCS_MONGO_OUTPUT_DATABASE",
"GCS_MONGO_OUTPUT_MODE",
"GCS_MONGO_OUTPUT_URI",
"GCS_TO_GCS_INPUT_FORMAT",
"GCS_TO_GCS_INPUT_LOCATION",
"GCS_TO_GCS_OUTPUT_FORMAT",
"GCS_TO_GCS_OUTPUT_LOCATION",
"GCS_TO_GCS_OUTPUT_MODE",
"GCS_TO_GCS_OUTPUT_PARTITION_COLUMN",
"GCS_TO_GCS_SQL_QUERY",
"GCS_TO_GCS_TEMP_VIEW_NAME",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_TABLE",
"JDBC_UPPERBOUND",
"JDBC_URL",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"MONGO_BATCH_SIZE",
"MONGO_COLLECTION",
"MONGO_DATABASE",
"MONGO_DEFAULT_BATCH_SIZE",
"MONGO_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_MONGO",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "GCS_MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_COLLECTION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_DATABASE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_URI",
"type": "statement"
},
{
"name": "GCS_TO_GCS_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_TO_GCS_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_PARTITION_COLUMN",
"type": "statement"
},
{
"name": "GCS_TO_GCS_SQL_QUERY",
"type": "statement"
},
{
"name": "GCS_TO_GCS_TEMP_VIEW_NAME",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_DATABASE",
"type": "statement"
},
{
"name": "MONGO_DEFAULT_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from google.cloud import storage
from dataproc_templates.gcs.gcs_to_gcs import GCSToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestGCSToGCSTemplate:
"""
Test suite for GCSToBigQueryTemplate
"""
def test_parse_args(self):
gcs_to_gcs_template = GCSToGCSTemplate()
parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=csv",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=csv",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
assert parsed_args["gcs.to.gcs.input.location"] == "gs://input"
assert parsed_args["gcs.to.gcs.input.format"] == "csv"
assert parsed_args["gcs.to.gcs.temp.view.name"] == "temp"
assert parsed_args["gcs.to.gcs.sql.query"] == "select * from temp"
assert parsed_args["gcs.to.gcs.output.format"] == "csv"
assert parsed_args["gcs.to.gcs.output.mode"] == "overwrite"
assert parsed_args["gcs.to.gcs.output.partition.column"] == "column"
assert parsed_args["gcs.to.gcs.output.location"] == "gs://output"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=parquet",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=parquet",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.parquet.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.parquet.assert_called_once_with("gs://input")
mock_spark_session.read.parquet().createOrReplaceTempView.assert_called_once_with("temp")
mock_spark_session.sql.assert_called_once_with("select * from temp")
mock_spark_session.sql().write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.sql().write \
.mode() \
.partitionBy.assert_called_once_with("column")
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.parquet.assert_called_once_with("gs://output")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_csv(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=csv",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=csv",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.csv.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_once_with(constants.FORMAT_CSV)
mock_spark_session.read.format() \
.option.assert_called_once_with(constants.HEADER, True)
mock_spark_session.read.format() \
.option() \
.option.assert_called_once_with(constants.INFER_SCHEMA, True)
mock_spark_session.read.format() \
.option() \
.option() \
.load.assert_called_once_with("gs://input")
mock_spark_session.read.format() \
.option() \
.option() \
.load() \
.createOrReplaceTempView.assert_called_once_with("temp")
mock_spark_session.sql.assert_called_once_with("select * from temp")
mock_spark_session.sql().write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.sql().write \
.mode() \
.partitionBy.assert_called_once_with("column")
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.option.assert_called_once_with(constants.HEADER, True)
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.option() \
.csv.assert_called_once_with("gs://output")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_avro(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=avro",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=avro",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.csv.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_once_with(constants.
|
398 | 16 | 7,131 |
googlecloudplatform__dataproc-templates
|
49e82f63f0e49578ce6451902da57a095bc02b5d
|
python/test/gcs/test_gcs_to_gcs.py
|
Unknown
|
OUTPUT_MODE_OVERWRITE
| true |
statement
| 124 | 124 | false | false |
[
"FORMAT_AVRO",
"OUTPUT_MODE_OVERWRITE",
"HEADER",
"FORMAT_CSV",
"INFER_SCHEMA",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_HBASE",
"FORMAT_JDBC",
"FORMAT_JSON",
"FORMAT_MONGO",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"GCS_MONGO_BATCH_SIZE",
"GCS_MONGO_INPUT_FORMAT",
"GCS_MONGO_INPUT_LOCATION",
"GCS_MONGO_OUTPUT_COLLECTION",
"GCS_MONGO_OUTPUT_DATABASE",
"GCS_MONGO_OUTPUT_MODE",
"GCS_MONGO_OUTPUT_URI",
"GCS_TO_GCS_INPUT_FORMAT",
"GCS_TO_GCS_INPUT_LOCATION",
"GCS_TO_GCS_OUTPUT_FORMAT",
"GCS_TO_GCS_OUTPUT_LOCATION",
"GCS_TO_GCS_OUTPUT_MODE",
"GCS_TO_GCS_OUTPUT_PARTITION_COLUMN",
"GCS_TO_GCS_SQL_QUERY",
"GCS_TO_GCS_TEMP_VIEW_NAME",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_TABLE",
"JDBC_UPPERBOUND",
"JDBC_URL",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"MONGO_BATCH_SIZE",
"MONGO_COLLECTION",
"MONGO_DATABASE",
"MONGO_DEFAULT_BATCH_SIZE",
"MONGO_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_MONGO",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "GCS_MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_COLLECTION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_DATABASE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_URI",
"type": "statement"
},
{
"name": "GCS_TO_GCS_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_TO_GCS_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_PARTITION_COLUMN",
"type": "statement"
},
{
"name": "GCS_TO_GCS_SQL_QUERY",
"type": "statement"
},
{
"name": "GCS_TO_GCS_TEMP_VIEW_NAME",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_DATABASE",
"type": "statement"
},
{
"name": "MONGO_DEFAULT_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from google.cloud import storage
from dataproc_templates.gcs.gcs_to_gcs import GCSToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestGCSToGCSTemplate:
"""
Test suite for GCSToBigQueryTemplate
"""
def test_parse_args(self):
gcs_to_gcs_template = GCSToGCSTemplate()
parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=csv",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=csv",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
assert parsed_args["gcs.to.gcs.input.location"] == "gs://input"
assert parsed_args["gcs.to.gcs.input.format"] == "csv"
assert parsed_args["gcs.to.gcs.temp.view.name"] == "temp"
assert parsed_args["gcs.to.gcs.sql.query"] == "select * from temp"
assert parsed_args["gcs.to.gcs.output.format"] == "csv"
assert parsed_args["gcs.to.gcs.output.mode"] == "overwrite"
assert parsed_args["gcs.to.gcs.output.partition.column"] == "column"
assert parsed_args["gcs.to.gcs.output.location"] == "gs://output"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=parquet",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=parquet",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.parquet.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.parquet.assert_called_once_with("gs://input")
mock_spark_session.read.parquet().createOrReplaceTempView.assert_called_once_with("temp")
mock_spark_session.sql.assert_called_once_with("select * from temp")
mock_spark_session.sql().write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.sql().write \
.mode() \
.partitionBy.assert_called_once_with("column")
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.parquet.assert_called_once_with("gs://output")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_csv(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=csv",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=csv",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.csv.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_once_with(constants.FORMAT_CSV)
mock_spark_session.read.format() \
.option.assert_called_once_with(constants.HEADER, True)
mock_spark_session.read.format() \
.option() \
.option.assert_called_once_with(constants.INFER_SCHEMA, True)
mock_spark_session.read.format() \
.option() \
.option() \
.load.assert_called_once_with("gs://input")
mock_spark_session.read.format() \
.option() \
.option() \
.load() \
.createOrReplaceTempView.assert_called_once_with("temp")
mock_spark_session.sql.assert_called_once_with("select * from temp")
mock_spark_session.sql().write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.sql().write \
.mode() \
.partitionBy.assert_called_once_with("column")
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.option.assert_called_once_with(constants.HEADER, True)
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.option() \
.csv.assert_called_once_with("gs://output")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_avro(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=avro",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=avro",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.csv.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.read.format() \
.load.assert_called_once_with("gs://input")
mock_spark_session.read.format() \
.load() \
.createOrReplaceTempView.assert_called_once_with("temp")
mock_spark_session.sql.assert_called_once_with("select * from temp")
mock_spark_session.sql().write \
.mode.assert_called_once_with(constants.
|
399 | 16 | 7,422 |
googlecloudplatform__dataproc-templates
|
49e82f63f0e49578ce6451902da57a095bc02b5d
|
python/test/gcs/test_gcs_to_gcs.py
|
Unknown
|
FORMAT_AVRO
| true |
statement
| 124 | 124 | false | false |
[
"FORMAT_AVRO",
"FORMAT_CSV",
"HEADER",
"OUTPUT_MODE_OVERWRITE",
"INFER_SCHEMA",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_HBASE",
"FORMAT_JDBC",
"FORMAT_JSON",
"FORMAT_MONGO",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"GCS_MONGO_BATCH_SIZE",
"GCS_MONGO_INPUT_FORMAT",
"GCS_MONGO_INPUT_LOCATION",
"GCS_MONGO_OUTPUT_COLLECTION",
"GCS_MONGO_OUTPUT_DATABASE",
"GCS_MONGO_OUTPUT_MODE",
"GCS_MONGO_OUTPUT_URI",
"GCS_TO_GCS_INPUT_FORMAT",
"GCS_TO_GCS_INPUT_LOCATION",
"GCS_TO_GCS_OUTPUT_FORMAT",
"GCS_TO_GCS_OUTPUT_LOCATION",
"GCS_TO_GCS_OUTPUT_MODE",
"GCS_TO_GCS_OUTPUT_PARTITION_COLUMN",
"GCS_TO_GCS_SQL_QUERY",
"GCS_TO_GCS_TEMP_VIEW_NAME",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_TABLE",
"JDBC_UPPERBOUND",
"JDBC_URL",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"MONGO_BATCH_SIZE",
"MONGO_COLLECTION",
"MONGO_DATABASE",
"MONGO_DEFAULT_BATCH_SIZE",
"MONGO_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_MONGO",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "GCS_MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_COLLECTION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_DATABASE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_URI",
"type": "statement"
},
{
"name": "GCS_TO_GCS_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_TO_GCS_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_PARTITION_COLUMN",
"type": "statement"
},
{
"name": "GCS_TO_GCS_SQL_QUERY",
"type": "statement"
},
{
"name": "GCS_TO_GCS_TEMP_VIEW_NAME",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_DATABASE",
"type": "statement"
},
{
"name": "MONGO_DEFAULT_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from google.cloud import storage
from dataproc_templates.gcs.gcs_to_gcs import GCSToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestGCSToGCSTemplate:
"""
Test suite for GCSToBigQueryTemplate
"""
def test_parse_args(self):
gcs_to_gcs_template = GCSToGCSTemplate()
parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=csv",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=csv",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
assert parsed_args["gcs.to.gcs.input.location"] == "gs://input"
assert parsed_args["gcs.to.gcs.input.format"] == "csv"
assert parsed_args["gcs.to.gcs.temp.view.name"] == "temp"
assert parsed_args["gcs.to.gcs.sql.query"] == "select * from temp"
assert parsed_args["gcs.to.gcs.output.format"] == "csv"
assert parsed_args["gcs.to.gcs.output.mode"] == "overwrite"
assert parsed_args["gcs.to.gcs.output.partition.column"] == "column"
assert parsed_args["gcs.to.gcs.output.location"] == "gs://output"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=parquet",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=parquet",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.parquet.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.parquet.assert_called_once_with("gs://input")
mock_spark_session.read.parquet().createOrReplaceTempView.assert_called_once_with("temp")
mock_spark_session.sql.assert_called_once_with("select * from temp")
mock_spark_session.sql().write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.sql().write \
.mode() \
.partitionBy.assert_called_once_with("column")
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.parquet.assert_called_once_with("gs://output")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_csv(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=csv",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=csv",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.csv.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_once_with(constants.FORMAT_CSV)
mock_spark_session.read.format() \
.option.assert_called_once_with(constants.HEADER, True)
mock_spark_session.read.format() \
.option() \
.option.assert_called_once_with(constants.INFER_SCHEMA, True)
mock_spark_session.read.format() \
.option() \
.option() \
.load.assert_called_once_with("gs://input")
mock_spark_session.read.format() \
.option() \
.option() \
.load() \
.createOrReplaceTempView.assert_called_once_with("temp")
mock_spark_session.sql.assert_called_once_with("select * from temp")
mock_spark_session.sql().write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.sql().write \
.mode() \
.partitionBy.assert_called_once_with("column")
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.option.assert_called_once_with(constants.HEADER, True)
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.option() \
.csv.assert_called_once_with("gs://output")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_avro(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=avro",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=avro",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.csv.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.read.format() \
.load.assert_called_once_with("gs://input")
mock_spark_session.read.format() \
.load() \
.createOrReplaceTempView.assert_called_once_with("temp")
mock_spark_session.sql.assert_called_once_with("select * from temp")
mock_spark_session.sql().write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.sql().write \
.mode() \
.partitionBy.assert_called_once_with("column")
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.format.assert_called_once_with(constants.
|
400 | 16 | 7,887 |
googlecloudplatform__dataproc-templates
|
49e82f63f0e49578ce6451902da57a095bc02b5d
|
python/test/gcs/test_gcs_to_gcs.py
|
Unknown
|
parse_args
| true |
function
| 4 | 4 | false | false |
[
"parse_args",
"run",
"build",
"get_logger",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "build",
"type": "function"
},
{
"name": "get_logger",
"type": "function"
},
{
"name": "parse_args",
"type": "function"
},
{
"name": "run",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from google.cloud import storage
from dataproc_templates.gcs.gcs_to_gcs import GCSToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestGCSToGCSTemplate:
"""
Test suite for GCSToBigQueryTemplate
"""
def test_parse_args(self):
gcs_to_gcs_template = GCSToGCSTemplate()
parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=csv",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=csv",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
assert parsed_args["gcs.to.gcs.input.location"] == "gs://input"
assert parsed_args["gcs.to.gcs.input.format"] == "csv"
assert parsed_args["gcs.to.gcs.temp.view.name"] == "temp"
assert parsed_args["gcs.to.gcs.sql.query"] == "select * from temp"
assert parsed_args["gcs.to.gcs.output.format"] == "csv"
assert parsed_args["gcs.to.gcs.output.mode"] == "overwrite"
assert parsed_args["gcs.to.gcs.output.partition.column"] == "column"
assert parsed_args["gcs.to.gcs.output.location"] == "gs://output"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=parquet",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=parquet",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.parquet.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.parquet.assert_called_once_with("gs://input")
mock_spark_session.read.parquet().createOrReplaceTempView.assert_called_once_with("temp")
mock_spark_session.sql.assert_called_once_with("select * from temp")
mock_spark_session.sql().write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.sql().write \
.mode() \
.partitionBy.assert_called_once_with("column")
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.parquet.assert_called_once_with("gs://output")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_csv(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=csv",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=csv",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.csv.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_once_with(constants.FORMAT_CSV)
mock_spark_session.read.format() \
.option.assert_called_once_with(constants.HEADER, True)
mock_spark_session.read.format() \
.option() \
.option.assert_called_once_with(constants.INFER_SCHEMA, True)
mock_spark_session.read.format() \
.option() \
.option() \
.load.assert_called_once_with("gs://input")
mock_spark_session.read.format() \
.option() \
.option() \
.load() \
.createOrReplaceTempView.assert_called_once_with("temp")
mock_spark_session.sql.assert_called_once_with("select * from temp")
mock_spark_session.sql().write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.sql().write \
.mode() \
.partitionBy.assert_called_once_with("column")
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.option.assert_called_once_with(constants.HEADER, True)
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.option() \
.csv.assert_called_once_with("gs://output")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_avro(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=avro",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=avro",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.csv.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.read.format() \
.load.assert_called_once_with("gs://input")
mock_spark_session.read.format() \
.load() \
.createOrReplaceTempView.assert_called_once_with("temp")
mock_spark_session.sql.assert_called_once_with("select * from temp")
mock_spark_session.sql().write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.sql().write \
.mode() \
.partitionBy.assert_called_once_with("column")
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.format() \
.save.assert_called_once_with("gs://output")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_json(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.
|
401 | 16 | 8,453 |
googlecloudplatform__dataproc-templates
|
49e82f63f0e49578ce6451902da57a095bc02b5d
|
python/test/gcs/test_gcs_to_gcs.py
|
Unknown
|
run
| true |
function
| 4 | 4 | false | false |
[
"run",
"parse_args",
"build",
"get_logger",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "build",
"type": "function"
},
{
"name": "get_logger",
"type": "function"
},
{
"name": "parse_args",
"type": "function"
},
{
"name": "run",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from google.cloud import storage
from dataproc_templates.gcs.gcs_to_gcs import GCSToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestGCSToGCSTemplate:
"""
Test suite for GCSToBigQueryTemplate
"""
def test_parse_args(self):
gcs_to_gcs_template = GCSToGCSTemplate()
parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=csv",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=csv",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
assert parsed_args["gcs.to.gcs.input.location"] == "gs://input"
assert parsed_args["gcs.to.gcs.input.format"] == "csv"
assert parsed_args["gcs.to.gcs.temp.view.name"] == "temp"
assert parsed_args["gcs.to.gcs.sql.query"] == "select * from temp"
assert parsed_args["gcs.to.gcs.output.format"] == "csv"
assert parsed_args["gcs.to.gcs.output.mode"] == "overwrite"
assert parsed_args["gcs.to.gcs.output.partition.column"] == "column"
assert parsed_args["gcs.to.gcs.output.location"] == "gs://output"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=parquet",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=parquet",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.parquet.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.parquet.assert_called_once_with("gs://input")
mock_spark_session.read.parquet().createOrReplaceTempView.assert_called_once_with("temp")
mock_spark_session.sql.assert_called_once_with("select * from temp")
mock_spark_session.sql().write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.sql().write \
.mode() \
.partitionBy.assert_called_once_with("column")
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.parquet.assert_called_once_with("gs://output")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_csv(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=csv",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=csv",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.csv.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_once_with(constants.FORMAT_CSV)
mock_spark_session.read.format() \
.option.assert_called_once_with(constants.HEADER, True)
mock_spark_session.read.format() \
.option() \
.option.assert_called_once_with(constants.INFER_SCHEMA, True)
mock_spark_session.read.format() \
.option() \
.option() \
.load.assert_called_once_with("gs://input")
mock_spark_session.read.format() \
.option() \
.option() \
.load() \
.createOrReplaceTempView.assert_called_once_with("temp")
mock_spark_session.sql.assert_called_once_with("select * from temp")
mock_spark_session.sql().write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.sql().write \
.mode() \
.partitionBy.assert_called_once_with("column")
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.option.assert_called_once_with(constants.HEADER, True)
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.option() \
.csv.assert_called_once_with("gs://output")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_avro(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=avro",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=avro",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.csv.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.read.format() \
.load.assert_called_once_with("gs://input")
mock_spark_session.read.format() \
.load() \
.createOrReplaceTempView.assert_called_once_with("temp")
mock_spark_session.sql.assert_called_once_with("select * from temp")
mock_spark_session.sql().write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.sql().write \
.mode() \
.partitionBy.assert_called_once_with("column")
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.format() \
.save.assert_called_once_with("gs://output")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_json(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=json",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=json",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.json.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.
|
402 | 16 | 8,836 |
googlecloudplatform__dataproc-templates
|
49e82f63f0e49578ce6451902da57a095bc02b5d
|
python/test/gcs/test_gcs_to_gcs.py
|
Unknown
|
OUTPUT_MODE_OVERWRITE
| true |
statement
| 124 | 124 | false | false |
[
"OUTPUT_MODE_OVERWRITE",
"HEADER",
"FORMAT_AVRO",
"FORMAT_CSV",
"INFER_SCHEMA",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"COMPRESSION_BZIP2",
"COMPRESSION_DEFLATE",
"COMPRESSION_GZIP",
"COMPRESSION_LZ4",
"COMPRESSION_NONE",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_HBASE",
"FORMAT_JDBC",
"FORMAT_JSON",
"FORMAT_MONGO",
"FORMAT_PRQT",
"FORMAT_TXT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"GCS_MONGO_BATCH_SIZE",
"GCS_MONGO_INPUT_FORMAT",
"GCS_MONGO_INPUT_LOCATION",
"GCS_MONGO_OUTPUT_COLLECTION",
"GCS_MONGO_OUTPUT_DATABASE",
"GCS_MONGO_OUTPUT_MODE",
"GCS_MONGO_OUTPUT_URI",
"GCS_TO_GCS_INPUT_FORMAT",
"GCS_TO_GCS_INPUT_LOCATION",
"GCS_TO_GCS_OUTPUT_FORMAT",
"GCS_TO_GCS_OUTPUT_LOCATION",
"GCS_TO_GCS_OUTPUT_MODE",
"GCS_TO_GCS_OUTPUT_PARTITION_COLUMN",
"GCS_TO_GCS_SQL_QUERY",
"GCS_TO_GCS_TEMP_VIEW_NAME",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"INPUT_COMPRESSION",
"INPUT_DELIMITER",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_TABLE",
"JDBC_UPPERBOUND",
"JDBC_URL",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"MONGO_BATCH_SIZE",
"MONGO_COLLECTION",
"MONGO_DATABASE",
"MONGO_DEFAULT_BATCH_SIZE",
"MONGO_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"TEXT_BQ_INPUT_INFERSCHEMA",
"TEXT_BQ_INPUT_LOCATION",
"TEXT_BQ_LD_TEMP_BUCKET_NAME",
"TEXT_BQ_OUTPUT_DATASET",
"TEXT_BQ_OUTPUT_MODE",
"TEXT_BQ_OUTPUT_TABLE",
"TEXT_BQ_TEMP_BUCKET",
"TEXT_INPUT_COMPRESSION",
"TEXT_INPUT_DELIMITER"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "COMPRESSION_BZIP2",
"type": "statement"
},
{
"name": "COMPRESSION_DEFLATE",
"type": "statement"
},
{
"name": "COMPRESSION_GZIP",
"type": "statement"
},
{
"name": "COMPRESSION_LZ4",
"type": "statement"
},
{
"name": "COMPRESSION_NONE",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_MONGO",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "FORMAT_TXT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "GCS_MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_MONGO_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_COLLECTION",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_DATABASE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_MONGO_OUTPUT_URI",
"type": "statement"
},
{
"name": "GCS_TO_GCS_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_TO_GCS_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_TO_GCS_OUTPUT_PARTITION_COLUMN",
"type": "statement"
},
{
"name": "GCS_TO_GCS_SQL_QUERY",
"type": "statement"
},
{
"name": "GCS_TO_GCS_TEMP_VIEW_NAME",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HEADER",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "INFER_SCHEMA",
"type": "statement"
},
{
"name": "INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "INPUT_DELIMITER",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "MONGO_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_COLLECTION",
"type": "statement"
},
{
"name": "MONGO_DATABASE",
"type": "statement"
},
{
"name": "MONGO_DEFAULT_BATCH_SIZE",
"type": "statement"
},
{
"name": "MONGO_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_INFERSCHEMA",
"type": "statement"
},
{
"name": "TEXT_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "TEXT_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "TEXT_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "TEXT_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "TEXT_INPUT_COMPRESSION",
"type": "statement"
},
{
"name": "TEXT_INPUT_DELIMITER",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from google.cloud import storage
from dataproc_templates.gcs.gcs_to_gcs import GCSToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestGCSToGCSTemplate:
"""
Test suite for GCSToBigQueryTemplate
"""
def test_parse_args(self):
gcs_to_gcs_template = GCSToGCSTemplate()
parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=csv",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=csv",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
assert parsed_args["gcs.to.gcs.input.location"] == "gs://input"
assert parsed_args["gcs.to.gcs.input.format"] == "csv"
assert parsed_args["gcs.to.gcs.temp.view.name"] == "temp"
assert parsed_args["gcs.to.gcs.sql.query"] == "select * from temp"
assert parsed_args["gcs.to.gcs.output.format"] == "csv"
assert parsed_args["gcs.to.gcs.output.mode"] == "overwrite"
assert parsed_args["gcs.to.gcs.output.partition.column"] == "column"
assert parsed_args["gcs.to.gcs.output.location"] == "gs://output"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_parquet(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=parquet",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=parquet",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.parquet.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.parquet.assert_called_once_with("gs://input")
mock_spark_session.read.parquet().createOrReplaceTempView.assert_called_once_with("temp")
mock_spark_session.sql.assert_called_once_with("select * from temp")
mock_spark_session.sql().write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.sql().write \
.mode() \
.partitionBy.assert_called_once_with("column")
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.parquet.assert_called_once_with("gs://output")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_csv(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=csv",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=csv",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.csv.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_once_with(constants.FORMAT_CSV)
mock_spark_session.read.format() \
.option.assert_called_once_with(constants.HEADER, True)
mock_spark_session.read.format() \
.option() \
.option.assert_called_once_with(constants.INFER_SCHEMA, True)
mock_spark_session.read.format() \
.option() \
.option() \
.load.assert_called_once_with("gs://input")
mock_spark_session.read.format() \
.option() \
.option() \
.load() \
.createOrReplaceTempView.assert_called_once_with("temp")
mock_spark_session.sql.assert_called_once_with("select * from temp")
mock_spark_session.sql().write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.sql().write \
.mode() \
.partitionBy.assert_called_once_with("column")
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.option.assert_called_once_with(constants.HEADER, True)
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.option() \
.csv.assert_called_once_with("gs://output")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_avro(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=avro",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=avro",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.csv.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.read.format() \
.load.assert_called_once_with("gs://input")
mock_spark_session.read.format() \
.load() \
.createOrReplaceTempView.assert_called_once_with("temp")
mock_spark_session.sql.assert_called_once_with("select * from temp")
mock_spark_session.sql().write \
.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.sql().write \
.mode() \
.partitionBy.assert_called_once_with("column")
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.format.assert_called_once_with(constants.FORMAT_AVRO)
mock_spark_session.sql().write \
.mode() \
.partitionBy() \
.format() \
.save.assert_called_once_with("gs://output")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_json(self, mock_spark_session):
"""Tests GCSToBigqueryTemplate runs with parquet format"""
gcs_to_gcs_template = GCSToGCSTemplate()
mock_parsed_args = gcs_to_gcs_template.parse_args(
["--gcs.to.gcs.input.location=gs://input",
"--gcs.to.gcs.input.format=json",
"--gcs.to.gcs.temp.view.name=temp",
"--gcs.to.gcs.sql.query=select * from temp",
"--gcs.to.gcs.output.format=json",
"--gcs.to.gcs.output.mode=overwrite",
"--gcs.to.gcs.output.partition.column=column",
"--gcs.to.gcs.output.location=gs://output"])
mock_spark_session.read.json.return_value = mock_spark_session.dataframe.DataFrame
gcs_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.json.assert_called_once_with("gs://input")
mock_spark_session.read.json().createOrReplaceTempView.assert_called_once_with("temp")
mock_spark_session.sql.assert_called_once_with("select * from temp")
mock_spark_session.sql().write \
.mode.assert_called_once_with(constants.
|
403 | 17 | 917 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
commited
|
__init__
| true |
function
| 8 | 8 | true | true |
[
"ctx",
"indent",
"uri",
"verbose",
"soma_options",
"exists",
"name",
"object_type",
"__init__",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().
|
407 | 17 | 1,422 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
verbose
| true |
statement
| 17 | 17 | false | true |
[
"verbose",
"attr_name",
"col_dim_name",
"row_dim_name",
"uri",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"indent",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.
|
408 | 17 | 1,452 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
get_start_stamp
| true |
function
| 6 | 12 | false | true |
[
"get_start_stamp",
"format_elapsed",
"find_csr_chunk_size",
"get_sort_and_permutation",
"Optional",
"_to_tiledb_supported_array_type"
] |
[
{
"name": "ad",
"type": "module"
},
{
"name": "find_csr_chunk_size",
"type": "function"
},
{
"name": "format_elapsed",
"type": "function"
},
{
"name": "get_sort_and_permutation",
"type": "function"
},
{
"name": "get_start_stamp",
"type": "function"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "pd",
"type": "module"
},
{
"name": "scipy",
"type": "module"
},
{
"name": "tiledb",
"type": "module"
},
{
"name": "time",
"type": "module"
},
{
"name": "_to_tiledb_supported_array_type",
"type": "function"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.
|
409 | 17 | 1,496 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
indent
| true |
statement
| 17 | 17 | false | true |
[
"uri",
"indent",
"attr_name",
"col_dim_name",
"row_dim_name",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.
|
410 | 17 | 1,524 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
uri
| true |
statement
| 17 | 17 | false | true |
[
"indent",
"uri",
"attr_name",
"col_dim_name",
"row_dim_name",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.
|
411 | 17 | 1,548 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
exists
| true |
function
| 17 | 17 | false | true |
[
"verbose",
"attr_name",
"col_dim_name",
"row_dim_name",
"uri",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"indent",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.
|
412 | 17 | 1,578 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
verbose
| true |
statement
| 17 | 17 | false | false |
[
"verbose",
"attr_name",
"col_dim_name",
"row_dim_name",
"uri",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"indent",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.
|
413 | 17 | 1,617 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
indent
| true |
statement
| 17 | 17 | false | false |
[
"uri",
"attr_name",
"indent",
"col_dim_name",
"row_dim_name",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.
|
414 | 17 | 1,654 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
uri
| true |
statement
| 17 | 17 | false | false |
[
"indent",
"uri",
"attr_name",
"col_dim_name",
"row_dim_name",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.
|
415 | 17 | 1,692 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
infile
|
create_empty_array
| true |
function
| 17 | 17 | false | true |
[
"attr_name",
"col_dim_name",
"row_dim_name",
"ingest_data_rows_chunked",
"ingest_data_whole",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"to_csr_matrix",
"ctx",
"exists",
"indent",
"name",
"object_type",
"soma_options",
"uri",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.
|
416 | 17 | 1,752 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
infile
|
ingest_data
| true |
function
| 17 | 17 | false | true |
[
"row_dim_name",
"attr_name",
"col_dim_name",
"uri",
"indent",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.
|
417 | 17 | 1,810 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
verbose
| true |
statement
| 17 | 17 | false | false |
[
"verbose",
"attr_name",
"col_dim_name",
"row_dim_name",
"uri",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"indent",
"name",
"object_type",
"soma_options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.
|
418 | 17 | 1,842 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
format_elapsed
| true |
function
| 6 | 12 | false | true |
[
"format_elapsed",
"get_start_stamp",
"find_csr_chunk_size",
"get_sort_and_permutation",
"Optional",
"_to_tiledb_supported_array_type"
] |
[
{
"name": "ad",
"type": "module"
},
{
"name": "find_csr_chunk_size",
"type": "function"
},
{
"name": "format_elapsed",
"type": "function"
},
{
"name": "get_sort_and_permutation",
"type": "function"
},
{
"name": "get_start_stamp",
"type": "function"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "pd",
"type": "module"
},
{
"name": "scipy",
"type": "module"
},
{
"name": "tiledb",
"type": "module"
},
{
"name": "time",
"type": "module"
},
{
"name": "_to_tiledb_supported_array_type",
"type": "function"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.
|
419 | 17 | 1,868 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
indent
| true |
statement
| 17 | 17 | false | false |
[
"uri",
"indent",
"attr_name",
"col_dim_name",
"row_dim_name",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.
|
420 | 17 | 1,896 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
uri
| true |
statement
| 17 | 17 | false | false |
[
"indent",
"uri",
"attr_name",
"col_dim_name",
"row_dim_name",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.
|
421 | 17 | 2,176 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
soma_options
| true |
statement
| 17 | 17 | false | true |
[
"uri",
"col_dim_name",
"row_dim_name",
"attr_name",
"from_matrix",
"__init__",
"create_empty_array",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"indent",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.
|
422 | 17 | 2,189 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
string_dim_zstd_level
| true |
statement
| 8 | 8 | false | true |
[
"X_capacity",
"X_cell_order",
"X_tile_order",
"goal_chunk_nnz",
"write_X_chunked_if_csr",
"obs_extent",
"string_dim_zstd_level",
"var_extent",
"__init__",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "goal_chunk_nnz",
"type": "statement"
},
{
"name": "obs_extent",
"type": "statement"
},
{
"name": "string_dim_zstd_level",
"type": "statement"
},
{
"name": "var_extent",
"type": "statement"
},
{
"name": "write_X_chunked_if_csr",
"type": "statement"
},
{
"name": "X_capacity",
"type": "statement"
},
{
"name": "X_cell_order",
"type": "statement"
},
{
"name": "X_tile_order",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__bool__",
"type": "instance"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__ge__",
"type": "instance"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__gt__",
"type": "instance"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__le__",
"type": "instance"
},
{
"name": "__lt__",
"type": "instance"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
},
{
"name": "__subclasshook__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.
|
423 | 17 | 2,273 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
row_dim_name
| true |
statement
| 17 | 17 | false | true |
[
"attr_name",
"col_dim_name",
"row_dim_name",
"name",
"ctx",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"exists",
"indent",
"object_type",
"soma_options",
"uri",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.
|
424 | 17 | 2,387 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
random
|
col_dim_name
| true |
statement
| 17 | 17 | false | true |
[
"attr_name",
"row_dim_name",
"col_dim_name",
"name",
"ctx",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"exists",
"indent",
"object_type",
"soma_options",
"uri",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.
|
425 | 17 | 2,501 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
ctx
| true |
statement
| 17 | 17 | false | true |
[
"ctx",
"col_dim_name",
"row_dim_name",
"soma_options",
"attr_name",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"exists",
"indent",
"name",
"object_type",
"uri",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.
|
426 | 17 | 2,547 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
attr_name
| true |
statement
| 17 | 17 | false | true |
[
"attr_name",
"uri",
"indent",
"col_dim_name",
"row_dim_name",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.
|
427 | 17 | 2,618 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
ctx
| true |
statement
| 17 | 17 | false | false |
[
"ctx",
"attr_name",
"col_dim_name",
"row_dim_name",
"soma_options",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"exists",
"indent",
"name",
"object_type",
"uri",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.
|
428 | 17 | 2,908 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
soma_options
| true |
statement
| 17 | 17 | false | false |
[
"col_dim_name",
"row_dim_name",
"uri",
"attr_name",
"ctx",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"exists",
"indent",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.
|
429 | 17 | 2,921 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
X_capacity
| true |
statement
| 8 | 8 | false | true |
[
"X_capacity",
"X_cell_order",
"X_tile_order",
"string_dim_zstd_level",
"goal_chunk_nnz",
"obs_extent",
"var_extent",
"write_X_chunked_if_csr",
"__init__",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "goal_chunk_nnz",
"type": "statement"
},
{
"name": "obs_extent",
"type": "statement"
},
{
"name": "string_dim_zstd_level",
"type": "statement"
},
{
"name": "var_extent",
"type": "statement"
},
{
"name": "write_X_chunked_if_csr",
"type": "statement"
},
{
"name": "X_capacity",
"type": "statement"
},
{
"name": "X_cell_order",
"type": "statement"
},
{
"name": "X_tile_order",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__bool__",
"type": "instance"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__ge__",
"type": "instance"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__gt__",
"type": "instance"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__le__",
"type": "instance"
},
{
"name": "__lt__",
"type": "instance"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
},
{
"name": "__subclasshook__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.
|
430 | 17 | 2,961 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
soma_options
| true |
statement
| 17 | 17 | false | false |
[
"col_dim_name",
"row_dim_name",
"uri",
"attr_name",
"ctx",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"exists",
"indent",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.
|
431 | 17 | 2,974 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
X_cell_order
| true |
statement
| 8 | 8 | false | true |
[
"X_tile_order",
"X_cell_order",
"X_capacity",
"string_dim_zstd_level",
"goal_chunk_nnz",
"obs_extent",
"var_extent",
"write_X_chunked_if_csr",
"__init__",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "goal_chunk_nnz",
"type": "statement"
},
{
"name": "obs_extent",
"type": "statement"
},
{
"name": "string_dim_zstd_level",
"type": "statement"
},
{
"name": "var_extent",
"type": "statement"
},
{
"name": "write_X_chunked_if_csr",
"type": "statement"
},
{
"name": "X_capacity",
"type": "statement"
},
{
"name": "X_cell_order",
"type": "statement"
},
{
"name": "X_tile_order",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__bool__",
"type": "instance"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__ge__",
"type": "instance"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__gt__",
"type": "instance"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__le__",
"type": "instance"
},
{
"name": "__lt__",
"type": "instance"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
},
{
"name": "__subclasshook__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.
|
432 | 17 | 3,016 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
soma_options
| true |
statement
| 17 | 17 | false | false |
[
"col_dim_name",
"row_dim_name",
"uri",
"attr_name",
"ctx",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"exists",
"indent",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.
|
433 | 17 | 3,029 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
X_tile_order
| true |
statement
| 8 | 8 | false | true |
[
"X_cell_order",
"X_tile_order",
"X_capacity",
"string_dim_zstd_level",
"goal_chunk_nnz",
"obs_extent",
"var_extent",
"write_X_chunked_if_csr",
"__init__",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "goal_chunk_nnz",
"type": "statement"
},
{
"name": "obs_extent",
"type": "statement"
},
{
"name": "string_dim_zstd_level",
"type": "statement"
},
{
"name": "var_extent",
"type": "statement"
},
{
"name": "write_X_chunked_if_csr",
"type": "statement"
},
{
"name": "X_capacity",
"type": "statement"
},
{
"name": "X_cell_order",
"type": "statement"
},
{
"name": "X_tile_order",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__bool__",
"type": "instance"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__ge__",
"type": "instance"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__gt__",
"type": "instance"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__le__",
"type": "instance"
},
{
"name": "__lt__",
"type": "instance"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
},
{
"name": "__subclasshook__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.
|
434 | 17 | 3,064 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
ctx
| true |
statement
| 17 | 17 | false | false |
[
"ctx",
"soma_options",
"col_dim_name",
"row_dim_name",
"attr_name",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"exists",
"indent",
"name",
"object_type",
"uri",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.
|
435 | 17 | 3,112 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
common
|
uri
| true |
statement
| 17 | 17 | false | false |
[
"attr_name",
"uri",
"indent",
"col_dim_name",
"row_dim_name",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.
|
436 | 17 | 3,131 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
common
|
ctx
| true |
statement
| 17 | 17 | false | false |
[
"ctx",
"uri",
"col_dim_name",
"row_dim_name",
"soma_options",
"__init__",
"attr_name",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"exists",
"indent",
"name",
"object_type",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.
|
437 | 17 | 3,387 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
random
|
soma_options
| true |
statement
| 17 | 17 | false | false |
[
"to_csr_matrix",
"from_matrix",
"col_dim_name",
"row_dim_name",
"attr_name",
"__init__",
"create_empty_array",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"ctx",
"exists",
"indent",
"name",
"object_type",
"soma_options",
"uri",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.
|
438 | 17 | 3,400 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
random
|
write_X_chunked_if_csr
| true |
statement
| 8 | 8 | false | true |
[
"X_capacity",
"X_cell_order",
"X_tile_order",
"goal_chunk_nnz",
"string_dim_zstd_level",
"obs_extent",
"var_extent",
"write_X_chunked_if_csr",
"__init__",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "goal_chunk_nnz",
"type": "statement"
},
{
"name": "obs_extent",
"type": "statement"
},
{
"name": "string_dim_zstd_level",
"type": "statement"
},
{
"name": "var_extent",
"type": "statement"
},
{
"name": "write_X_chunked_if_csr",
"type": "statement"
},
{
"name": "X_capacity",
"type": "statement"
},
{
"name": "X_cell_order",
"type": "statement"
},
{
"name": "X_tile_order",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__bool__",
"type": "instance"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__ge__",
"type": "instance"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__gt__",
"type": "instance"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__le__",
"type": "instance"
},
{
"name": "__lt__",
"type": "instance"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
},
{
"name": "__subclasshook__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.
|
439 | 17 | 3,441 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
infile
|
ingest_data_rows_chunked
| true |
function
| 17 | 17 | false | true |
[
"attr_name",
"ingest_data_whole",
"col_dim_name",
"row_dim_name",
"create_empty_array",
"__init__",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"to_csr_matrix",
"ctx",
"exists",
"indent",
"name",
"object_type",
"soma_options",
"uri",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.
|
440 | 17 | 3,527 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
infile
|
ingest_data_whole
| true |
function
| 17 | 17 | false | true |
[
"ingest_data_rows_chunked",
"attr_name",
"col_dim_name",
"row_dim_name",
"create_empty_array",
"__init__",
"from_matrix",
"ingest_data",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"indent",
"name",
"object_type",
"soma_options",
"uri",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.
|
441 | 17 | 4,242 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
uri
| true |
statement
| 17 | 17 | false | false |
[
"attr_name",
"uri",
"col_dim_name",
"row_dim_name",
"indent",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.ingest_data_whole(matrix, row_names, col_names)
# ----------------------------------------------------------------
def ingest_data_whole(self, matrix, row_names, col_names) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param matrix: Matrix-like object coercible to a scipy coo_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
mat_coo = scipy.sparse.coo_matrix(matrix)
d0 = row_names[mat_coo.row]
d1 = col_names[mat_coo.col]
with tiledb.open(self.
|
442 | 17 | 4,266 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
ctx
| true |
statement
| 17 | 17 | false | false |
[
"ctx",
"col_dim_name",
"row_dim_name",
"attr_name",
"uri",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"exists",
"indent",
"name",
"object_type",
"soma_options",
"verbose",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.ingest_data_whole(matrix, row_names, col_names)
# ----------------------------------------------------------------
def ingest_data_whole(self, matrix, row_names, col_names) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param matrix: Matrix-like object coercible to a scipy coo_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
mat_coo = scipy.sparse.coo_matrix(matrix)
d0 = row_names[mat_coo.row]
d1 = col_names[mat_coo.col]
with tiledb.open(self.uri, mode="w", ctx=self.
|
443 | 17 | 6,382 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
inproject
|
get_sort_and_permutation
| true |
function
| 6 | 12 | false | true |
[
"get_start_stamp",
"format_elapsed",
"find_csr_chunk_size",
"Optional",
"get_sort_and_permutation",
"_to_tiledb_supported_array_type"
] |
[
{
"name": "ad",
"type": "module"
},
{
"name": "find_csr_chunk_size",
"type": "function"
},
{
"name": "format_elapsed",
"type": "function"
},
{
"name": "get_sort_and_permutation",
"type": "function"
},
{
"name": "get_start_stamp",
"type": "function"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "pd",
"type": "module"
},
{
"name": "scipy",
"type": "module"
},
{
"name": "tiledb",
"type": "module"
},
{
"name": "time",
"type": "module"
},
{
"name": "_to_tiledb_supported_array_type",
"type": "function"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.ingest_data_whole(matrix, row_names, col_names)
# ----------------------------------------------------------------
def ingest_data_whole(self, matrix, row_names, col_names) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param matrix: Matrix-like object coercible to a scipy coo_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
mat_coo = scipy.sparse.coo_matrix(matrix)
d0 = row_names[mat_coo.row]
d1 = col_names[mat_coo.col]
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# Example: suppose this 4x3 is to be written in two chunks of two rows each
# but written in sorted order.
#
# Original Sorted Permutation
# data row names
#
# X Y Z
# C 0 1 2 A 1
# A 4 0 5 B 2
# B 7 0 0 C 0
# D 0 8 9 D 3
#
# First chunk:
# * Row indices 0,1 map to permutation indices 1,2
# * i,i2 are 0,2
# * chunk_coo is original matrix rows 1,2
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [0,1]
# * sorted_row_names: ['A', 'B']
#
# Second chunk:
# * Row indices 2,3 map to permutation indices 0,3
# * i,i2 are 2,4
# * chunk_coo is original matrix rows 0,3
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.
|
444 | 17 | 6,605 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
inproject
|
get_start_stamp
| true |
function
| 6 | 12 | false | false |
[
"get_start_stamp",
"format_elapsed",
"find_csr_chunk_size",
"get_sort_and_permutation",
"Optional",
"_to_tiledb_supported_array_type"
] |
[
{
"name": "ad",
"type": "module"
},
{
"name": "find_csr_chunk_size",
"type": "function"
},
{
"name": "format_elapsed",
"type": "function"
},
{
"name": "get_sort_and_permutation",
"type": "function"
},
{
"name": "get_start_stamp",
"type": "function"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "Optional",
"type": "class"
},
{
"name": "pd",
"type": "module"
},
{
"name": "scipy",
"type": "module"
},
{
"name": "tiledb",
"type": "module"
},
{
"name": "time",
"type": "module"
},
{
"name": "_to_tiledb_supported_array_type",
"type": "function"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.ingest_data_whole(matrix, row_names, col_names)
# ----------------------------------------------------------------
def ingest_data_whole(self, matrix, row_names, col_names) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param matrix: Matrix-like object coercible to a scipy coo_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
mat_coo = scipy.sparse.coo_matrix(matrix)
d0 = row_names[mat_coo.row]
d1 = col_names[mat_coo.col]
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# Example: suppose this 4x3 is to be written in two chunks of two rows each
# but written in sorted order.
#
# Original Sorted Permutation
# data row names
#
# X Y Z
# C 0 1 2 A 1
# A 4 0 5 B 2
# B 7 0 0 C 0
# D 0 8 9 D 3
#
# First chunk:
# * Row indices 0,1 map to permutation indices 1,2
# * i,i2 are 0,2
# * chunk_coo is original matrix rows 1,2
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [0,1]
# * sorted_row_names: ['A', 'B']
#
# Second chunk:
# * Row indices 2,3 map to permutation indices 0,3
# * i,i2 are 2,4
# * chunk_coo is original matrix rows 0,3
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.get_sort_and_permutation(list(row_names))
# Using numpy we can index this with a list of indices, which a plain Python list doesn't support.
sorted_row_names = np.asarray(sorted_row_names)
s = util.
|
445 | 17 | 6,639 |
single-cell-data__tiledb-soma
|
0a9b47f0d37d4e309d85897423bcea947086b39a
|
apis/python/src/tiledbsc/assay_matrix.py
|
Unknown
|
verbose
| true |
statement
| 17 | 17 | false | false |
[
"verbose",
"row_dim_name",
"attr_name",
"col_dim_name",
"ingest_data",
"__init__",
"create_empty_array",
"from_matrix",
"ingest_data_rows_chunked",
"ingest_data_whole",
"to_csr_matrix",
"ctx",
"exists",
"indent",
"name",
"object_type",
"soma_options",
"uri",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "attr_name",
"type": "statement"
},
{
"name": "col_dim_name",
"type": "statement"
},
{
"name": "create_empty_array",
"type": "function"
},
{
"name": "ctx",
"type": "statement"
},
{
"name": "exists",
"type": "function"
},
{
"name": "from_matrix",
"type": "function"
},
{
"name": "indent",
"type": "statement"
},
{
"name": "ingest_data",
"type": "function"
},
{
"name": "ingest_data_rows_chunked",
"type": "function"
},
{
"name": "ingest_data_whole",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "object_type",
"type": "function"
},
{
"name": "row_dim_name",
"type": "statement"
},
{
"name": "soma_options",
"type": "statement"
},
{
"name": "to_csr_matrix",
"type": "function"
},
{
"name": "uri",
"type": "statement"
},
{
"name": "verbose",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import tiledb
from .soma_options import SOMAOptions
from .tiledb_array import TileDBArray
from .tiledb_group import TileDBGroup
from .tiledb_object import TileDBObject
import tiledbsc.util as util
import scipy
import numpy as np
from typing import Optional
class AssayMatrix(TileDBArray):
"""
Wraps a TileDB sparse array with two string dimensions.
Used for X, obsp members, and varp members.
"""
row_dim_name: str # obs_id for X, obs_id_i for obsp; var_id_i for varp
col_dim_name: str # var_id for X, obs_id_j for obsp; var_id_j for varp
attr_name: str
# ----------------------------------------------------------------
def __init__(
self,
uri: str,
name: str,
row_dim_name: str,
col_dim_name: str,
parent: Optional[TileDBGroup] = None,
):
"""
See the TileDBObject constructor.
"""
super().__init__(uri=uri, name=name, parent=parent)
self.row_dim_name = row_dim_name
self.col_dim_name = col_dim_name
self.attr_name = 'value'
# ----------------------------------------------------------------
def from_matrix(self, matrix, row_names, col_names) -> None:
"""
Imports a matrix -- nominally scipy.sparse.csr_matrix or numpy.ndarray -- into a TileDB
array which is used for X, obsp members, and varp members.
"""
if self.verbose:
s = util.get_start_stamp()
print(f"{self.indent}START WRITING {self.uri}")
if self.exists():
if self.verbose:
print(f"{self.indent}Re-using existing array {self.uri}")
else:
self.create_empty_array(matrix_dtype=matrix.dtype)
self.ingest_data(matrix, row_names, col_names)
if self.verbose:
print(util.format_elapsed(s, f"{self.indent}FINISH WRITING {self.uri}"))
# ----------------------------------------------------------------
def create_empty_array(self, matrix_dtype: np.dtype) -> None:
"""
Create a TileDB 2D sparse array with string dimensions and a single attribute.
"""
level = self.soma_options.string_dim_zstd_level
dom = tiledb.Domain(
tiledb.Dim(name=self.row_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.RleFilter()]),
tiledb.Dim(name=self.col_dim_name, domain=(None, None), dtype="ascii", filters=[tiledb.ZstdFilter(level=level)]),
ctx=self.ctx
)
att = tiledb.Attr(self.attr_name, dtype=matrix_dtype, filters=[tiledb.ZstdFilter()], ctx=self.ctx)
sch = tiledb.ArraySchema(
domain=dom,
attrs=(att,),
sparse=True,
allows_duplicates=True,
offsets_filters=[tiledb.DoubleDeltaFilter(), tiledb.BitWidthReductionFilter(), tiledb.ZstdFilter()],
capacity=self.soma_options.X_capacity,
cell_order=self.soma_options.X_cell_order,
tile_order=self.soma_options.X_tile_order,
ctx=self.ctx
)
tiledb.Array.create(self.uri, sch, ctx=self.ctx)
# ----------------------------------------------------------------
def ingest_data(self, matrix, row_names, col_names) -> None:
# TODO: add chunked support for CSC
if isinstance(matrix, scipy.sparse._csr.csr_matrix) and self.soma_options.write_X_chunked_if_csr:
self.ingest_data_rows_chunked(matrix, row_names, col_names)
else:
self.ingest_data_whole(matrix, row_names, col_names)
# ----------------------------------------------------------------
def ingest_data_whole(self, matrix, row_names, col_names) -> None:
"""
Convert ndarray/(csr|csc)matrix to coo_matrix and ingest into TileDB.
:param matrix: Matrix-like object coercible to a scipy coo_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
mat_coo = scipy.sparse.coo_matrix(matrix)
d0 = row_names[mat_coo.row]
d1 = col_names[mat_coo.col]
with tiledb.open(self.uri, mode="w", ctx=self.ctx) as A:
A[d0, d1] = mat_coo.data
# ----------------------------------------------------------------
# Example: suppose this 4x3 is to be written in two chunks of two rows each
# but written in sorted order.
#
# Original Sorted Permutation
# data row names
#
# X Y Z
# C 0 1 2 A 1
# A 4 0 5 B 2
# B 7 0 0 C 0
# D 0 8 9 D 3
#
# First chunk:
# * Row indices 0,1 map to permutation indices 1,2
# * i,i2 are 0,2
# * chunk_coo is original matrix rows 1,2
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [0,1]
# * sorted_row_names: ['A', 'B']
#
# Second chunk:
# * Row indices 2,3 map to permutation indices 0,3
# * i,i2 are 2,4
# * chunk_coo is original matrix rows 0,3
# * chunk_coo.row is [0,1]
# * chunk_coo.row + i is [2,3]
# * sorted_row_names: ['C', 'D']
#
# See README-csr-ingest.md for important information of using this ingestor.
# ----------------------------------------------------------------
def ingest_data_rows_chunked(self, matrix, row_names, col_names) -> None:
"""
Convert csr_matrix to coo_matrix chunkwise and ingest into TileDB.
:param uri: TileDB URI of the array to be written.
:param matrix: csr_matrix.
:param row_names: List of row names.
:param col_names: List of column names.
"""
assert len(row_names) == matrix.shape[0]
assert len(col_names) == matrix.shape[1]
# Sort the row names so we can write chunks indexed by sorted string keys. This will lead
# to efficient TileDB fragments in the sparse array indexed by these string keys.
#
# Key note: only the _obs labels_ are being sorted, and along with them come permutation
# indices for accessing the CSR matrix via cursor-indirection -- e.g. csr[28] is accessed as
# with csr[permuation[28]] -- the CSR matrix itself isn't sorted in bulk.
sorted_row_names, permutation = util.get_sort_and_permutation(list(row_names))
# Using numpy we can index this with a list of indices, which a plain Python list doesn't support.
sorted_row_names = np.asarray(sorted_row_names)
s = util.get_start_stamp()
if self.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.