idx
int64 0
7.85k
| idx_lca
int64 0
223
| offset
int64 162
55k
| repo
stringclasses 62
values | commit_hash
stringclasses 113
values | target_file
stringclasses 134
values | line_type_lca
stringclasses 7
values | ground_truth
stringlengths 1
46
| in_completions
bool 1
class | completion_type
stringclasses 6
values | non_dunder_count_intellij
int64 0
529
| non_dunder_count_jedi
int64 0
128
| start_with_
bool 2
classes | first_occurrence
bool 2
classes | intellij_completions
listlengths 1
532
| jedi_completions
listlengths 3
148
| prefix
stringlengths 162
55k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
137 | 6 | 2,207 |
christophermayes__xopt
|
683fd0c3af2f0fc12a598932b20e3afe8070112b
|
xopt/generators/ga/cnsga.py
|
Unknown
|
offspring
| true |
statement
| 16 | 17 | false | false |
[
"population",
"toolbox",
"n_pop",
"children",
"offspring",
"__init__",
"_vocs",
"create_children",
"crossover_probability",
"generate",
"mutation_probability",
"update_data",
"data",
"is_done",
"options",
"vocs",
"for",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "children",
"type": "statement"
},
{
"name": "create_children",
"type": "function"
},
{
"name": "crossover_probability",
"type": "statement"
},
{
"name": "data",
"type": "property"
},
{
"name": "generate",
"type": "function"
},
{
"name": "is_done",
"type": "property"
},
{
"name": "mutation_probability",
"type": "statement"
},
{
"name": "n_pop",
"type": "statement"
},
{
"name": "offspring",
"type": "statement"
},
{
"name": "options",
"type": "statement"
},
{
"name": "population",
"type": "statement"
},
{
"name": "toolbox",
"type": "statement"
},
{
"name": "update_data",
"type": "function"
},
{
"name": "vocs",
"type": "property"
},
{
"name": "_data",
"type": "statement"
},
{
"name": "_is_done",
"type": "statement"
},
{
"name": "_vocs",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
from xopt.generators.ga import deap_creator
from xopt.generators.ga.deap_fitness_with_constraints import FitnessWithConstraints
from xopt import Generator
from deap import base as deap_base
from deap import tools as deap_tools
from deap import algorithms as deap_algorithms
import pandas as pd
import random
import array
from typing import List, Dict
import logging
logger = logging.getLogger(__name__)
from typing import List, Dict
class CNSGAGenerator(Generator):
def __init__(self, vocs, *,
n_pop,
data = None,
crossover_probability = 0.9,
mutation_probability = 1.0
):
self._vocs = vocs # TODO: use proper options
self.n_pop = n_pop
self.crossover_probability = crossover_probability
self.mutation_probability = mutation_probability
# Internal data structures
self.children = [] # unevaluated inputs. This should be a list of dicts.
self.population = None # The latest population (fully evaluated)
self.offspring = None # Newly evaluated data, but not yet added to population
# DEAP toolbox (internal)
self.toolbox = cnsga_toolbox(vocs, selection='auto')
if data is not None:
self.population = cnsga_select(data, n_pop, vocs, self.toolbox)
def create_children(self):
# No population, so create random children
if self.population is None:
return [self.vocs.random_inputs() for _ in range(self.n_pop)]
# Use population to create children
inputs = cnsga_variation(self.population, self.vocs, self.toolbox,
crossover_probability=self.crossover_probability, mutation_probability=self.mutation_probability)
return inputs.to_dict(orient='records')
def update_data(self, new_data: pd.DataFrame):
self.offspring = pd.concat([self.offspring, new_data])
# Next generation
if len(self.offspring) >= self.n_pop:
if self.population is None:
self.population = self.offspring.iloc[:self.n_pop]
self.offspring = self.offspring.iloc[self.n_pop:]
else:
candidates = pd.concat([self.population, self.
|
139 | 6 | 2,283 |
christophermayes__xopt
|
683fd0c3af2f0fc12a598932b20e3afe8070112b
|
xopt/generators/ga/cnsga.py
|
inproject
|
n_pop
| true |
statement
| 15 | 17 | false | false |
[
"population",
"toolbox",
"offspring",
"n_pop",
"children",
"__init__",
"_vocs",
"create_children",
"crossover_probability",
"generate",
"mutation_probability",
"update_data",
"data",
"is_done",
"options",
"vocs",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "children",
"type": "statement"
},
{
"name": "create_children",
"type": "function"
},
{
"name": "crossover_probability",
"type": "statement"
},
{
"name": "data",
"type": "property"
},
{
"name": "generate",
"type": "function"
},
{
"name": "is_done",
"type": "property"
},
{
"name": "mutation_probability",
"type": "statement"
},
{
"name": "n_pop",
"type": "statement"
},
{
"name": "offspring",
"type": "statement"
},
{
"name": "options",
"type": "statement"
},
{
"name": "population",
"type": "statement"
},
{
"name": "toolbox",
"type": "statement"
},
{
"name": "update_data",
"type": "function"
},
{
"name": "vocs",
"type": "property"
},
{
"name": "_data",
"type": "statement"
},
{
"name": "_is_done",
"type": "statement"
},
{
"name": "_vocs",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
from xopt.generators.ga import deap_creator
from xopt.generators.ga.deap_fitness_with_constraints import FitnessWithConstraints
from xopt import Generator
from deap import base as deap_base
from deap import tools as deap_tools
from deap import algorithms as deap_algorithms
import pandas as pd
import random
import array
from typing import List, Dict
import logging
logger = logging.getLogger(__name__)
from typing import List, Dict
class CNSGAGenerator(Generator):
def __init__(self, vocs, *,
n_pop,
data = None,
crossover_probability = 0.9,
mutation_probability = 1.0
):
self._vocs = vocs # TODO: use proper options
self.n_pop = n_pop
self.crossover_probability = crossover_probability
self.mutation_probability = mutation_probability
# Internal data structures
self.children = [] # unevaluated inputs. This should be a list of dicts.
self.population = None # The latest population (fully evaluated)
self.offspring = None # Newly evaluated data, but not yet added to population
# DEAP toolbox (internal)
self.toolbox = cnsga_toolbox(vocs, selection='auto')
if data is not None:
self.population = cnsga_select(data, n_pop, vocs, self.toolbox)
def create_children(self):
# No population, so create random children
if self.population is None:
return [self.vocs.random_inputs() for _ in range(self.n_pop)]
# Use population to create children
inputs = cnsga_variation(self.population, self.vocs, self.toolbox,
crossover_probability=self.crossover_probability, mutation_probability=self.mutation_probability)
return inputs.to_dict(orient='records')
def update_data(self, new_data: pd.DataFrame):
self.offspring = pd.concat([self.offspring, new_data])
# Next generation
if len(self.offspring) >= self.n_pop:
if self.population is None:
self.population = self.offspring.iloc[:self.n_pop]
self.offspring = self.offspring.iloc[self.n_pop:]
else:
candidates = pd.concat([self.population, self.offspring])
self.population = cnsga_select(candidates, self.
|
140 | 6 | 2,295 |
christophermayes__xopt
|
683fd0c3af2f0fc12a598932b20e3afe8070112b
|
xopt/generators/ga/cnsga.py
|
inproject
|
vocs
| true |
property
| 15 | 17 | false | false |
[
"vocs",
"population",
"toolbox",
"offspring",
"n_pop",
"__init__",
"_vocs",
"children",
"create_children",
"crossover_probability",
"generate",
"mutation_probability",
"update_data",
"data",
"is_done",
"options",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "children",
"type": "statement"
},
{
"name": "create_children",
"type": "function"
},
{
"name": "crossover_probability",
"type": "statement"
},
{
"name": "data",
"type": "property"
},
{
"name": "generate",
"type": "function"
},
{
"name": "is_done",
"type": "property"
},
{
"name": "mutation_probability",
"type": "statement"
},
{
"name": "n_pop",
"type": "statement"
},
{
"name": "offspring",
"type": "statement"
},
{
"name": "options",
"type": "statement"
},
{
"name": "population",
"type": "statement"
},
{
"name": "toolbox",
"type": "statement"
},
{
"name": "update_data",
"type": "function"
},
{
"name": "vocs",
"type": "property"
},
{
"name": "_data",
"type": "statement"
},
{
"name": "_is_done",
"type": "statement"
},
{
"name": "_vocs",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
from xopt.generators.ga import deap_creator
from xopt.generators.ga.deap_fitness_with_constraints import FitnessWithConstraints
from xopt import Generator
from deap import base as deap_base
from deap import tools as deap_tools
from deap import algorithms as deap_algorithms
import pandas as pd
import random
import array
from typing import List, Dict
import logging
logger = logging.getLogger(__name__)
from typing import List, Dict
class CNSGAGenerator(Generator):
def __init__(self, vocs, *,
n_pop,
data = None,
crossover_probability = 0.9,
mutation_probability = 1.0
):
self._vocs = vocs # TODO: use proper options
self.n_pop = n_pop
self.crossover_probability = crossover_probability
self.mutation_probability = mutation_probability
# Internal data structures
self.children = [] # unevaluated inputs. This should be a list of dicts.
self.population = None # The latest population (fully evaluated)
self.offspring = None # Newly evaluated data, but not yet added to population
# DEAP toolbox (internal)
self.toolbox = cnsga_toolbox(vocs, selection='auto')
if data is not None:
self.population = cnsga_select(data, n_pop, vocs, self.toolbox)
def create_children(self):
# No population, so create random children
if self.population is None:
return [self.vocs.random_inputs() for _ in range(self.n_pop)]
# Use population to create children
inputs = cnsga_variation(self.population, self.vocs, self.toolbox,
crossover_probability=self.crossover_probability, mutation_probability=self.mutation_probability)
return inputs.to_dict(orient='records')
def update_data(self, new_data: pd.DataFrame):
self.offspring = pd.concat([self.offspring, new_data])
# Next generation
if len(self.offspring) >= self.n_pop:
if self.population is None:
self.population = self.offspring.iloc[:self.n_pop]
self.offspring = self.offspring.iloc[self.n_pop:]
else:
candidates = pd.concat([self.population, self.offspring])
self.population = cnsga_select(candidates, self.n_pop, self.
|
141 | 6 | 2,306 |
christophermayes__xopt
|
683fd0c3af2f0fc12a598932b20e3afe8070112b
|
xopt/generators/ga/cnsga.py
|
inproject
|
toolbox
| true |
statement
| 15 | 17 | false | false |
[
"toolbox",
"population",
"offspring",
"n_pop",
"children",
"__init__",
"_vocs",
"create_children",
"crossover_probability",
"generate",
"mutation_probability",
"update_data",
"data",
"is_done",
"options",
"vocs",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "children",
"type": "statement"
},
{
"name": "create_children",
"type": "function"
},
{
"name": "crossover_probability",
"type": "statement"
},
{
"name": "data",
"type": "property"
},
{
"name": "generate",
"type": "function"
},
{
"name": "is_done",
"type": "property"
},
{
"name": "mutation_probability",
"type": "statement"
},
{
"name": "n_pop",
"type": "statement"
},
{
"name": "offspring",
"type": "statement"
},
{
"name": "options",
"type": "statement"
},
{
"name": "population",
"type": "statement"
},
{
"name": "toolbox",
"type": "statement"
},
{
"name": "update_data",
"type": "function"
},
{
"name": "vocs",
"type": "property"
},
{
"name": "_data",
"type": "statement"
},
{
"name": "_is_done",
"type": "statement"
},
{
"name": "_vocs",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
from xopt.generators.ga import deap_creator
from xopt.generators.ga.deap_fitness_with_constraints import FitnessWithConstraints
from xopt import Generator
from deap import base as deap_base
from deap import tools as deap_tools
from deap import algorithms as deap_algorithms
import pandas as pd
import random
import array
from typing import List, Dict
import logging
logger = logging.getLogger(__name__)
from typing import List, Dict
class CNSGAGenerator(Generator):
def __init__(self, vocs, *,
n_pop,
data = None,
crossover_probability = 0.9,
mutation_probability = 1.0
):
self._vocs = vocs # TODO: use proper options
self.n_pop = n_pop
self.crossover_probability = crossover_probability
self.mutation_probability = mutation_probability
# Internal data structures
self.children = [] # unevaluated inputs. This should be a list of dicts.
self.population = None # The latest population (fully evaluated)
self.offspring = None # Newly evaluated data, but not yet added to population
# DEAP toolbox (internal)
self.toolbox = cnsga_toolbox(vocs, selection='auto')
if data is not None:
self.population = cnsga_select(data, n_pop, vocs, self.toolbox)
def create_children(self):
# No population, so create random children
if self.population is None:
return [self.vocs.random_inputs() for _ in range(self.n_pop)]
# Use population to create children
inputs = cnsga_variation(self.population, self.vocs, self.toolbox,
crossover_probability=self.crossover_probability, mutation_probability=self.mutation_probability)
return inputs.to_dict(orient='records')
def update_data(self, new_data: pd.DataFrame):
self.offspring = pd.concat([self.offspring, new_data])
# Next generation
if len(self.offspring) >= self.n_pop:
if self.population is None:
self.population = self.offspring.iloc[:self.n_pop]
self.offspring = self.offspring.iloc[self.n_pop:]
else:
candidates = pd.concat([self.population, self.offspring])
self.population = cnsga_select(candidates, self.n_pop, self.vocs, self.
|
144 | 6 | 2,635 |
christophermayes__xopt
|
683fd0c3af2f0fc12a598932b20e3afe8070112b
|
xopt/generators/ga/cnsga.py
|
Unknown
|
children
| true |
statement
| 15 | 17 | false | true |
[
"n_pop",
"offspring",
"population",
"toolbox",
"children",
"__init__",
"_vocs",
"create_children",
"crossover_probability",
"generate",
"mutation_probability",
"update_data",
"data",
"is_done",
"options",
"vocs",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "children",
"type": "statement"
},
{
"name": "create_children",
"type": "function"
},
{
"name": "crossover_probability",
"type": "statement"
},
{
"name": "data",
"type": "property"
},
{
"name": "generate",
"type": "function"
},
{
"name": "is_done",
"type": "property"
},
{
"name": "mutation_probability",
"type": "statement"
},
{
"name": "n_pop",
"type": "statement"
},
{
"name": "offspring",
"type": "statement"
},
{
"name": "options",
"type": "statement"
},
{
"name": "population",
"type": "statement"
},
{
"name": "toolbox",
"type": "statement"
},
{
"name": "update_data",
"type": "function"
},
{
"name": "vocs",
"type": "property"
},
{
"name": "_data",
"type": "statement"
},
{
"name": "_is_done",
"type": "statement"
},
{
"name": "_vocs",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
from xopt.generators.ga import deap_creator
from xopt.generators.ga.deap_fitness_with_constraints import FitnessWithConstraints
from xopt import Generator
from deap import base as deap_base
from deap import tools as deap_tools
from deap import algorithms as deap_algorithms
import pandas as pd
import random
import array
from typing import List, Dict
import logging
logger = logging.getLogger(__name__)
from typing import List, Dict
class CNSGAGenerator(Generator):
def __init__(self, vocs, *,
n_pop,
data = None,
crossover_probability = 0.9,
mutation_probability = 1.0
):
self._vocs = vocs # TODO: use proper options
self.n_pop = n_pop
self.crossover_probability = crossover_probability
self.mutation_probability = mutation_probability
# Internal data structures
self.children = [] # unevaluated inputs. This should be a list of dicts.
self.population = None # The latest population (fully evaluated)
self.offspring = None # Newly evaluated data, but not yet added to population
# DEAP toolbox (internal)
self.toolbox = cnsga_toolbox(vocs, selection='auto')
if data is not None:
self.population = cnsga_select(data, n_pop, vocs, self.toolbox)
def create_children(self):
# No population, so create random children
if self.population is None:
return [self.vocs.random_inputs() for _ in range(self.n_pop)]
# Use population to create children
inputs = cnsga_variation(self.population, self.vocs, self.toolbox,
crossover_probability=self.crossover_probability, mutation_probability=self.mutation_probability)
return inputs.to_dict(orient='records')
def update_data(self, new_data: pd.DataFrame):
self.offspring = pd.concat([self.offspring, new_data])
# Next generation
if len(self.offspring) >= self.n_pop:
if self.population is None:
self.population = self.offspring.iloc[:self.n_pop]
self.offspring = self.offspring.iloc[self.n_pop:]
else:
candidates = pd.concat([self.population, self.offspring])
self.population = cnsga_select(candidates, self.n_pop, self.vocs, self.toolbox)
self.children = [] # reset children
self.offspring = None # reset offspring
def generate(self, n_candidates) -> List[Dict]:
"""
generate `n_candidates` candidates
"""
# Make sure we have enough children to fulfill the request
while len(self.
|
145 | 6 | 2,678 |
christophermayes__xopt
|
683fd0c3af2f0fc12a598932b20e3afe8070112b
|
xopt/generators/ga/cnsga.py
|
infile
|
children
| true |
statement
| 15 | 17 | false | false |
[
"n_pop",
"toolbox",
"offspring",
"population",
"children",
"__init__",
"_vocs",
"create_children",
"crossover_probability",
"generate",
"mutation_probability",
"update_data",
"data",
"is_done",
"options",
"vocs",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "children",
"type": "statement"
},
{
"name": "create_children",
"type": "function"
},
{
"name": "crossover_probability",
"type": "statement"
},
{
"name": "data",
"type": "property"
},
{
"name": "generate",
"type": "function"
},
{
"name": "is_done",
"type": "property"
},
{
"name": "mutation_probability",
"type": "statement"
},
{
"name": "n_pop",
"type": "statement"
},
{
"name": "offspring",
"type": "statement"
},
{
"name": "options",
"type": "statement"
},
{
"name": "population",
"type": "statement"
},
{
"name": "toolbox",
"type": "statement"
},
{
"name": "update_data",
"type": "function"
},
{
"name": "vocs",
"type": "property"
},
{
"name": "_data",
"type": "statement"
},
{
"name": "_is_done",
"type": "statement"
},
{
"name": "_vocs",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
from xopt.generators.ga import deap_creator
from xopt.generators.ga.deap_fitness_with_constraints import FitnessWithConstraints
from xopt import Generator
from deap import base as deap_base
from deap import tools as deap_tools
from deap import algorithms as deap_algorithms
import pandas as pd
import random
import array
from typing import List, Dict
import logging
logger = logging.getLogger(__name__)
from typing import List, Dict
class CNSGAGenerator(Generator):
def __init__(self, vocs, *,
n_pop,
data = None,
crossover_probability = 0.9,
mutation_probability = 1.0
):
self._vocs = vocs # TODO: use proper options
self.n_pop = n_pop
self.crossover_probability = crossover_probability
self.mutation_probability = mutation_probability
# Internal data structures
self.children = [] # unevaluated inputs. This should be a list of dicts.
self.population = None # The latest population (fully evaluated)
self.offspring = None # Newly evaluated data, but not yet added to population
# DEAP toolbox (internal)
self.toolbox = cnsga_toolbox(vocs, selection='auto')
if data is not None:
self.population = cnsga_select(data, n_pop, vocs, self.toolbox)
def create_children(self):
# No population, so create random children
if self.population is None:
return [self.vocs.random_inputs() for _ in range(self.n_pop)]
# Use population to create children
inputs = cnsga_variation(self.population, self.vocs, self.toolbox,
crossover_probability=self.crossover_probability, mutation_probability=self.mutation_probability)
return inputs.to_dict(orient='records')
def update_data(self, new_data: pd.DataFrame):
self.offspring = pd.concat([self.offspring, new_data])
# Next generation
if len(self.offspring) >= self.n_pop:
if self.population is None:
self.population = self.offspring.iloc[:self.n_pop]
self.offspring = self.offspring.iloc[self.n_pop:]
else:
candidates = pd.concat([self.population, self.offspring])
self.population = cnsga_select(candidates, self.n_pop, self.vocs, self.toolbox)
self.children = [] # reset children
self.offspring = None # reset offspring
def generate(self, n_candidates) -> List[Dict]:
"""
generate `n_candidates` candidates
"""
# Make sure we have enough children to fulfill the request
while len(self.children) < n_candidates:
self.
|
146 | 6 | 2,699 |
christophermayes__xopt
|
683fd0c3af2f0fc12a598932b20e3afe8070112b
|
xopt/generators/ga/cnsga.py
|
infile
|
create_children
| true |
function
| 15 | 17 | false | true |
[
"children",
"n_pop",
"offspring",
"population",
"toolbox",
"__init__",
"_vocs",
"create_children",
"crossover_probability",
"generate",
"mutation_probability",
"update_data",
"data",
"is_done",
"options",
"vocs",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "children",
"type": "statement"
},
{
"name": "create_children",
"type": "function"
},
{
"name": "crossover_probability",
"type": "statement"
},
{
"name": "data",
"type": "property"
},
{
"name": "generate",
"type": "function"
},
{
"name": "is_done",
"type": "property"
},
{
"name": "mutation_probability",
"type": "statement"
},
{
"name": "n_pop",
"type": "statement"
},
{
"name": "offspring",
"type": "statement"
},
{
"name": "options",
"type": "statement"
},
{
"name": "population",
"type": "statement"
},
{
"name": "toolbox",
"type": "statement"
},
{
"name": "update_data",
"type": "function"
},
{
"name": "vocs",
"type": "property"
},
{
"name": "_data",
"type": "statement"
},
{
"name": "_is_done",
"type": "statement"
},
{
"name": "_vocs",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
from xopt.generators.ga import deap_creator
from xopt.generators.ga.deap_fitness_with_constraints import FitnessWithConstraints
from xopt import Generator
from deap import base as deap_base
from deap import tools as deap_tools
from deap import algorithms as deap_algorithms
import pandas as pd
import random
import array
from typing import List, Dict
import logging
logger = logging.getLogger(__name__)
from typing import List, Dict
class CNSGAGenerator(Generator):
def __init__(self, vocs, *,
n_pop,
data = None,
crossover_probability = 0.9,
mutation_probability = 1.0
):
self._vocs = vocs # TODO: use proper options
self.n_pop = n_pop
self.crossover_probability = crossover_probability
self.mutation_probability = mutation_probability
# Internal data structures
self.children = [] # unevaluated inputs. This should be a list of dicts.
self.population = None # The latest population (fully evaluated)
self.offspring = None # Newly evaluated data, but not yet added to population
# DEAP toolbox (internal)
self.toolbox = cnsga_toolbox(vocs, selection='auto')
if data is not None:
self.population = cnsga_select(data, n_pop, vocs, self.toolbox)
def create_children(self):
# No population, so create random children
if self.population is None:
return [self.vocs.random_inputs() for _ in range(self.n_pop)]
# Use population to create children
inputs = cnsga_variation(self.population, self.vocs, self.toolbox,
crossover_probability=self.crossover_probability, mutation_probability=self.mutation_probability)
return inputs.to_dict(orient='records')
def update_data(self, new_data: pd.DataFrame):
self.offspring = pd.concat([self.offspring, new_data])
# Next generation
if len(self.offspring) >= self.n_pop:
if self.population is None:
self.population = self.offspring.iloc[:self.n_pop]
self.offspring = self.offspring.iloc[self.n_pop:]
else:
candidates = pd.concat([self.population, self.offspring])
self.population = cnsga_select(candidates, self.n_pop, self.vocs, self.toolbox)
self.children = [] # reset children
self.offspring = None # reset offspring
def generate(self, n_candidates) -> List[Dict]:
"""
generate `n_candidates` candidates
"""
# Make sure we have enough children to fulfill the request
while len(self.children) < n_candidates:
self.children.extend(self.
|
147 | 6 | 2,749 |
christophermayes__xopt
|
683fd0c3af2f0fc12a598932b20e3afe8070112b
|
xopt/generators/ga/cnsga.py
|
Unknown
|
children
| true |
statement
| 15 | 17 | false | false |
[
"n_pop",
"toolbox",
"offspring",
"population",
"children",
"__init__",
"_vocs",
"create_children",
"crossover_probability",
"generate",
"mutation_probability",
"update_data",
"data",
"is_done",
"options",
"vocs",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "children",
"type": "statement"
},
{
"name": "create_children",
"type": "function"
},
{
"name": "crossover_probability",
"type": "statement"
},
{
"name": "data",
"type": "property"
},
{
"name": "generate",
"type": "function"
},
{
"name": "is_done",
"type": "property"
},
{
"name": "mutation_probability",
"type": "statement"
},
{
"name": "n_pop",
"type": "statement"
},
{
"name": "offspring",
"type": "statement"
},
{
"name": "options",
"type": "statement"
},
{
"name": "population",
"type": "statement"
},
{
"name": "toolbox",
"type": "statement"
},
{
"name": "update_data",
"type": "function"
},
{
"name": "vocs",
"type": "property"
},
{
"name": "_data",
"type": "statement"
},
{
"name": "_is_done",
"type": "statement"
},
{
"name": "_vocs",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
from xopt.generators.ga import deap_creator
from xopt.generators.ga.deap_fitness_with_constraints import FitnessWithConstraints
from xopt import Generator
from deap import base as deap_base
from deap import tools as deap_tools
from deap import algorithms as deap_algorithms
import pandas as pd
import random
import array
from typing import List, Dict
import logging
logger = logging.getLogger(__name__)
from typing import List, Dict
class CNSGAGenerator(Generator):
def __init__(self, vocs, *,
n_pop,
data = None,
crossover_probability = 0.9,
mutation_probability = 1.0
):
self._vocs = vocs # TODO: use proper options
self.n_pop = n_pop
self.crossover_probability = crossover_probability
self.mutation_probability = mutation_probability
# Internal data structures
self.children = [] # unevaluated inputs. This should be a list of dicts.
self.population = None # The latest population (fully evaluated)
self.offspring = None # Newly evaluated data, but not yet added to population
# DEAP toolbox (internal)
self.toolbox = cnsga_toolbox(vocs, selection='auto')
if data is not None:
self.population = cnsga_select(data, n_pop, vocs, self.toolbox)
def create_children(self):
# No population, so create random children
if self.population is None:
return [self.vocs.random_inputs() for _ in range(self.n_pop)]
# Use population to create children
inputs = cnsga_variation(self.population, self.vocs, self.toolbox,
crossover_probability=self.crossover_probability, mutation_probability=self.mutation_probability)
return inputs.to_dict(orient='records')
def update_data(self, new_data: pd.DataFrame):
self.offspring = pd.concat([self.offspring, new_data])
# Next generation
if len(self.offspring) >= self.n_pop:
if self.population is None:
self.population = self.offspring.iloc[:self.n_pop]
self.offspring = self.offspring.iloc[self.n_pop:]
else:
candidates = pd.concat([self.population, self.offspring])
self.population = cnsga_select(candidates, self.n_pop, self.vocs, self.toolbox)
self.children = [] # reset children
self.offspring = None # reset offspring
def generate(self, n_candidates) -> List[Dict]:
"""
generate `n_candidates` candidates
"""
# Make sure we have enough children to fulfill the request
while len(self.children) < n_candidates:
self.children.extend(self.create_children())
return [self.
|
149 | 6 | 4,085 |
christophermayes__xopt
|
683fd0c3af2f0fc12a598932b20e3afe8070112b
|
xopt/generators/ga/cnsga.py
|
common
|
create
| true |
function
| 6 | 11 | false | true |
[
"create",
"MetaCreator",
"meta_create",
"class_replacers",
"_array",
"_numpy_array"
] |
[
{
"name": "array",
"type": "module"
},
{
"name": "class_replacers",
"type": "statement"
},
{
"name": "copy",
"type": "module"
},
{
"name": "copyreg",
"type": "module"
},
{
"name": "create",
"type": "function"
},
{
"name": "meta_create",
"type": "function"
},
{
"name": "MetaCreator",
"type": "class"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "warnings",
"type": "module"
},
{
"name": "_array",
"type": "class"
},
{
"name": "_numpy_array",
"type": "class"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
from xopt.generators.ga import deap_creator
from xopt.generators.ga.deap_fitness_with_constraints import FitnessWithConstraints
from xopt import Generator
from deap import base as deap_base
from deap import tools as deap_tools
from deap import algorithms as deap_algorithms
import pandas as pd
import random
import array
from typing import List, Dict
import logging
logger = logging.getLogger(__name__)
from typing import List, Dict
class CNSGAGenerator(Generator):
def __init__(self, vocs, *,
n_pop,
data = None,
crossover_probability = 0.9,
mutation_probability = 1.0
):
self._vocs = vocs # TODO: use proper options
self.n_pop = n_pop
self.crossover_probability = crossover_probability
self.mutation_probability = mutation_probability
# Internal data structures
self.children = [] # unevaluated inputs. This should be a list of dicts.
self.population = None # The latest population (fully evaluated)
self.offspring = None # Newly evaluated data, but not yet added to population
# DEAP toolbox (internal)
self.toolbox = cnsga_toolbox(vocs, selection='auto')
if data is not None:
self.population = cnsga_select(data, n_pop, vocs, self.toolbox)
def create_children(self):
# No population, so create random children
if self.population is None:
return [self.vocs.random_inputs() for _ in range(self.n_pop)]
# Use population to create children
inputs = cnsga_variation(self.population, self.vocs, self.toolbox,
crossover_probability=self.crossover_probability, mutation_probability=self.mutation_probability)
return inputs.to_dict(orient='records')
def update_data(self, new_data: pd.DataFrame):
self.offspring = pd.concat([self.offspring, new_data])
# Next generation
if len(self.offspring) >= self.n_pop:
if self.population is None:
self.population = self.offspring.iloc[:self.n_pop]
self.offspring = self.offspring.iloc[self.n_pop:]
else:
candidates = pd.concat([self.population, self.offspring])
self.population = cnsga_select(candidates, self.n_pop, self.vocs, self.toolbox)
self.children = [] # reset children
self.offspring = None # reset offspring
def generate(self, n_candidates) -> List[Dict]:
"""
generate `n_candidates` candidates
"""
# Make sure we have enough children to fulfill the request
while len(self.children) < n_candidates:
self.children.extend(self.create_children())
return [self.children.pop() for _ in range(n_candidates)]
def uniform(low, up, size=None):
"""
"""
try:
return [random.uniform(a, b) for a, b in zip(low, up)]
except TypeError:
return [random.uniform(a, b) for a, b in zip([low] * size, [up] * size)]
def cnsga_toolbox(vocs, selection='auto'):
"""
Creates a DEAP toolbox from VOCS dict for use with cnsga.
Selection options:
nsga2: Standard NSGA2 [Deb2002] selection
nsga3: NSGA3 [Deb2014] selection
spea2: SPEA-II [Zitzler2001] selection
auto: will choose nsga2 for <= 2 objectives, otherwise nsga3
See DEAP code for details.
"""
var, obj, con = vocs.variables, vocs.objectives, vocs.constraints
n_var = len(var)
n_obj = len(obj)
n_con = len(con)
var_labels = vocs.variable_names
obj_labels = vocs.objective_names
bound_low, bound_up = vocs.bounds
# DEAP does not like arrays, needs tuples.
bound_low = tuple(bound_low)
bound_up = tuple(bound_up)
# creator should assign already weighted values (for minimization)
weights = tuple([-1]*n_obj)
# Create MyFitness
if 'MyFitness' in dir(deap_creator):
del deap_creator.MyFitness
if n_con == 0:
# Normal Fitness class
deap_creator.
|
150 | 6 | 4,226 |
christophermayes__xopt
|
683fd0c3af2f0fc12a598932b20e3afe8070112b
|
xopt/generators/ga/cnsga.py
|
common
|
create
| true |
function
| 6 | 11 | false | false |
[
"create",
"MetaCreator",
"meta_create",
"class_replacers",
"_array",
"_numpy_array"
] |
[
{
"name": "array",
"type": "module"
},
{
"name": "class_replacers",
"type": "statement"
},
{
"name": "copy",
"type": "module"
},
{
"name": "copyreg",
"type": "module"
},
{
"name": "create",
"type": "function"
},
{
"name": "meta_create",
"type": "function"
},
{
"name": "MetaCreator",
"type": "class"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "warnings",
"type": "module"
},
{
"name": "_array",
"type": "class"
},
{
"name": "_numpy_array",
"type": "class"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
from xopt.generators.ga import deap_creator
from xopt.generators.ga.deap_fitness_with_constraints import FitnessWithConstraints
from xopt import Generator
from deap import base as deap_base
from deap import tools as deap_tools
from deap import algorithms as deap_algorithms
import pandas as pd
import random
import array
from typing import List, Dict
import logging
logger = logging.getLogger(__name__)
from typing import List, Dict
class CNSGAGenerator(Generator):
def __init__(self, vocs, *,
n_pop,
data = None,
crossover_probability = 0.9,
mutation_probability = 1.0
):
self._vocs = vocs # TODO: use proper options
self.n_pop = n_pop
self.crossover_probability = crossover_probability
self.mutation_probability = mutation_probability
# Internal data structures
self.children = [] # unevaluated inputs. This should be a list of dicts.
self.population = None # The latest population (fully evaluated)
self.offspring = None # Newly evaluated data, but not yet added to population
# DEAP toolbox (internal)
self.toolbox = cnsga_toolbox(vocs, selection='auto')
if data is not None:
self.population = cnsga_select(data, n_pop, vocs, self.toolbox)
def create_children(self):
# No population, so create random children
if self.population is None:
return [self.vocs.random_inputs() for _ in range(self.n_pop)]
# Use population to create children
inputs = cnsga_variation(self.population, self.vocs, self.toolbox,
crossover_probability=self.crossover_probability, mutation_probability=self.mutation_probability)
return inputs.to_dict(orient='records')
def update_data(self, new_data: pd.DataFrame):
self.offspring = pd.concat([self.offspring, new_data])
# Next generation
if len(self.offspring) >= self.n_pop:
if self.population is None:
self.population = self.offspring.iloc[:self.n_pop]
self.offspring = self.offspring.iloc[self.n_pop:]
else:
candidates = pd.concat([self.population, self.offspring])
self.population = cnsga_select(candidates, self.n_pop, self.vocs, self.toolbox)
self.children = [] # reset children
self.offspring = None # reset offspring
def generate(self, n_candidates) -> List[Dict]:
"""
generate `n_candidates` candidates
"""
# Make sure we have enough children to fulfill the request
while len(self.children) < n_candidates:
self.children.extend(self.create_children())
return [self.children.pop() for _ in range(n_candidates)]
def uniform(low, up, size=None):
"""
"""
try:
return [random.uniform(a, b) for a, b in zip(low, up)]
except TypeError:
return [random.uniform(a, b) for a, b in zip([low] * size, [up] * size)]
def cnsga_toolbox(vocs, selection='auto'):
"""
Creates a DEAP toolbox from VOCS dict for use with cnsga.
Selection options:
nsga2: Standard NSGA2 [Deb2002] selection
nsga3: NSGA3 [Deb2014] selection
spea2: SPEA-II [Zitzler2001] selection
auto: will choose nsga2 for <= 2 objectives, otherwise nsga3
See DEAP code for details.
"""
var, obj, con = vocs.variables, vocs.objectives, vocs.constraints
n_var = len(var)
n_obj = len(obj)
n_con = len(con)
var_labels = vocs.variable_names
obj_labels = vocs.objective_names
bound_low, bound_up = vocs.bounds
# DEAP does not like arrays, needs tuples.
bound_low = tuple(bound_low)
bound_up = tuple(bound_up)
# creator should assign already weighted values (for minimization)
weights = tuple([-1]*n_obj)
# Create MyFitness
if 'MyFitness' in dir(deap_creator):
del deap_creator.MyFitness
if n_con == 0:
# Normal Fitness class
deap_creator.create('MyFitness', deap_base.Fitness, weights=weights, labels=obj_labels)
else:
# Fitness with Constraints
deap_creator.
|
152 | 6 | 4,500 |
christophermayes__xopt
|
683fd0c3af2f0fc12a598932b20e3afe8070112b
|
xopt/generators/ga/cnsga.py
|
common
|
create
| true |
function
| 6 | 11 | false | false |
[
"create",
"MetaCreator",
"meta_create",
"class_replacers",
"_array",
"_numpy_array"
] |
[
{
"name": "array",
"type": "module"
},
{
"name": "class_replacers",
"type": "statement"
},
{
"name": "copy",
"type": "module"
},
{
"name": "copyreg",
"type": "module"
},
{
"name": "create",
"type": "function"
},
{
"name": "meta_create",
"type": "function"
},
{
"name": "MetaCreator",
"type": "class"
},
{
"name": "numpy",
"type": "module"
},
{
"name": "warnings",
"type": "module"
},
{
"name": "_array",
"type": "class"
},
{
"name": "_numpy_array",
"type": "class"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
from xopt.generators.ga import deap_creator
from xopt.generators.ga.deap_fitness_with_constraints import FitnessWithConstraints
from xopt import Generator
from deap import base as deap_base
from deap import tools as deap_tools
from deap import algorithms as deap_algorithms
import pandas as pd
import random
import array
from typing import List, Dict
import logging
logger = logging.getLogger(__name__)
from typing import List, Dict
class CNSGAGenerator(Generator):
def __init__(self, vocs, *,
n_pop,
data = None,
crossover_probability = 0.9,
mutation_probability = 1.0
):
self._vocs = vocs # TODO: use proper options
self.n_pop = n_pop
self.crossover_probability = crossover_probability
self.mutation_probability = mutation_probability
# Internal data structures
self.children = [] # unevaluated inputs. This should be a list of dicts.
self.population = None # The latest population (fully evaluated)
self.offspring = None # Newly evaluated data, but not yet added to population
# DEAP toolbox (internal)
self.toolbox = cnsga_toolbox(vocs, selection='auto')
if data is not None:
self.population = cnsga_select(data, n_pop, vocs, self.toolbox)
def create_children(self):
# No population, so create random children
if self.population is None:
return [self.vocs.random_inputs() for _ in range(self.n_pop)]
# Use population to create children
inputs = cnsga_variation(self.population, self.vocs, self.toolbox,
crossover_probability=self.crossover_probability, mutation_probability=self.mutation_probability)
return inputs.to_dict(orient='records')
def update_data(self, new_data: pd.DataFrame):
self.offspring = pd.concat([self.offspring, new_data])
# Next generation
if len(self.offspring) >= self.n_pop:
if self.population is None:
self.population = self.offspring.iloc[:self.n_pop]
self.offspring = self.offspring.iloc[self.n_pop:]
else:
candidates = pd.concat([self.population, self.offspring])
self.population = cnsga_select(candidates, self.n_pop, self.vocs, self.toolbox)
self.children = [] # reset children
self.offspring = None # reset offspring
def generate(self, n_candidates) -> List[Dict]:
"""
generate `n_candidates` candidates
"""
# Make sure we have enough children to fulfill the request
while len(self.children) < n_candidates:
self.children.extend(self.create_children())
return [self.children.pop() for _ in range(n_candidates)]
def uniform(low, up, size=None):
"""
"""
try:
return [random.uniform(a, b) for a, b in zip(low, up)]
except TypeError:
return [random.uniform(a, b) for a, b in zip([low] * size, [up] * size)]
def cnsga_toolbox(vocs, selection='auto'):
"""
Creates a DEAP toolbox from VOCS dict for use with cnsga.
Selection options:
nsga2: Standard NSGA2 [Deb2002] selection
nsga3: NSGA3 [Deb2014] selection
spea2: SPEA-II [Zitzler2001] selection
auto: will choose nsga2 for <= 2 objectives, otherwise nsga3
See DEAP code for details.
"""
var, obj, con = vocs.variables, vocs.objectives, vocs.constraints
n_var = len(var)
n_obj = len(obj)
n_con = len(con)
var_labels = vocs.variable_names
obj_labels = vocs.objective_names
bound_low, bound_up = vocs.bounds
# DEAP does not like arrays, needs tuples.
bound_low = tuple(bound_low)
bound_up = tuple(bound_up)
# creator should assign already weighted values (for minimization)
weights = tuple([-1]*n_obj)
# Create MyFitness
if 'MyFitness' in dir(deap_creator):
del deap_creator.MyFitness
if n_con == 0:
# Normal Fitness class
deap_creator.create('MyFitness', deap_base.Fitness, weights=weights, labels=obj_labels)
else:
# Fitness with Constraints
deap_creator.create('MyFitness', FitnessWithConstraints,
weights=weights, n_constraints=n_con, labels=obj_labels)
# Create Individual. Check if exists first.
if 'Individual' in dir(deap_creator):
del deap_creator.Individual
deap_creator.
|
153 | 7 | 1,356 |
cerfacs-globc__icclim
|
7d571ec4d1e1b1fcc1433bb178de2bc0f2f2f0b7
|
icclim/tests/unit_tests/test_input_parsing.py
|
Unknown
|
OUTPUT_ZARR_STORE
| true |
statement
| 14 | 14 | false | true |
[
"OUTPUT_NC_FILE",
"OUTPUT_NC_FILE_2",
"OUTPUT_ZARR_STORE",
"OUTPUT_UNKNOWN_FORMAT",
"test_read_dataset_not_implemented_error",
"cleanup",
"test_read_dataset_multi_netcdf_success",
"test_read_dataset_netcdf_success",
"test_read_dataset_xr_da_ecad_index_error",
"test_read_dataset_xr_da_ecad_index_success",
"test_read_dataset_xr_da_user_index_error",
"test_read_dataset_xr_da_user_index_success",
"test_read_dataset_xr_ds_success",
"test_read_dataset_zarr_store_success",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "cleanup",
"type": "function"
},
{
"name": "OUTPUT_NC_FILE",
"type": "statement"
},
{
"name": "OUTPUT_NC_FILE_2",
"type": "statement"
},
{
"name": "OUTPUT_UNKNOWN_FORMAT",
"type": "statement"
},
{
"name": "OUTPUT_ZARR_STORE",
"type": "statement"
},
{
"name": "test_read_dataset_multi_netcdf_success",
"type": "function"
},
{
"name": "test_read_dataset_netcdf_success",
"type": "function"
},
{
"name": "test_read_dataset_not_implemented_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_ecad_index_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_ecad_index_success",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_user_index_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_user_index_success",
"type": "function"
},
{
"name": "test_read_dataset_xr_ds_success",
"type": "function"
},
{
"name": "test_read_dataset_zarr_store_success",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import os
import shutil
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from icclim.icclim_exceptions import InvalidIcclimArgumentError
from icclim.models.ecad_indices import EcadIndex
from icclim.pre_processing.input_parsing import read_dataset, update_to_standard_coords
def test_update_to_standard_coords():
# GIVEN
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
# WHEN
res, revert = update_to_standard_coords(ds)
# THEN
assert "lat" in res.coords
assert "time" in res.coords
assert "lon" in res.coords
assert res.rename(revert).coords.keys() == ds.coords.keys()
class Test_ReadDataset:
OUTPUT_NC_FILE = "tmp.nc"
OUTPUT_NC_FILE_2 = "tmp-2.nc"
OUTPUT_ZARR_STORE = "tmp.zarr"
OUTPUT_UNKNOWN_FORMAT = "tmp.cacahuete"
@pytest.fixture(autouse=True)
def cleanup(self):
# setup
yield
# teardown
shutil.rmtree(self.
|
154 | 7 | 1,431 |
cerfacs-globc__icclim
|
7d571ec4d1e1b1fcc1433bb178de2bc0f2f2f0b7
|
icclim/tests/unit_tests/test_input_parsing.py
|
Unknown
|
OUTPUT_NC_FILE
| true |
statement
| 15 | 14 | false | true |
[
"OUTPUT_NC_FILE",
"OUTPUT_NC_FILE_2",
"OUTPUT_ZARR_STORE",
"OUTPUT_UNKNOWN_FORMAT",
"test_read_dataset_not_implemented_error",
"cleanup",
"test_read_dataset_multi_netcdf_success",
"test_read_dataset_netcdf_success",
"test_read_dataset_xr_da_ecad_index_error",
"test_read_dataset_xr_da_ecad_index_success",
"test_read_dataset_xr_da_user_index_error",
"test_read_dataset_xr_da_user_index_success",
"test_read_dataset_xr_ds_success",
"test_read_dataset_zarr_store_success",
"for",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "cleanup",
"type": "function"
},
{
"name": "OUTPUT_NC_FILE",
"type": "statement"
},
{
"name": "OUTPUT_NC_FILE_2",
"type": "statement"
},
{
"name": "OUTPUT_UNKNOWN_FORMAT",
"type": "statement"
},
{
"name": "OUTPUT_ZARR_STORE",
"type": "statement"
},
{
"name": "test_read_dataset_multi_netcdf_success",
"type": "function"
},
{
"name": "test_read_dataset_netcdf_success",
"type": "function"
},
{
"name": "test_read_dataset_not_implemented_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_ecad_index_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_ecad_index_success",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_user_index_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_user_index_success",
"type": "function"
},
{
"name": "test_read_dataset_xr_ds_success",
"type": "function"
},
{
"name": "test_read_dataset_zarr_store_success",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import os
import shutil
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from icclim.icclim_exceptions import InvalidIcclimArgumentError
from icclim.models.ecad_indices import EcadIndex
from icclim.pre_processing.input_parsing import read_dataset, update_to_standard_coords
def test_update_to_standard_coords():
# GIVEN
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
# WHEN
res, revert = update_to_standard_coords(ds)
# THEN
assert "lat" in res.coords
assert "time" in res.coords
assert "lon" in res.coords
assert res.rename(revert).coords.keys() == ds.coords.keys()
class Test_ReadDataset:
OUTPUT_NC_FILE = "tmp.nc"
OUTPUT_NC_FILE_2 = "tmp-2.nc"
OUTPUT_ZARR_STORE = "tmp.zarr"
OUTPUT_UNKNOWN_FORMAT = "tmp.cacahuete"
@pytest.fixture(autouse=True)
def cleanup(self):
# setup
yield
# teardown
shutil.rmtree(self.OUTPUT_ZARR_STORE, ignore_errors=True)
for f in [
self.
|
155 | 7 | 1,464 |
cerfacs-globc__icclim
|
7d571ec4d1e1b1fcc1433bb178de2bc0f2f2f0b7
|
icclim/tests/unit_tests/test_input_parsing.py
|
Unknown
|
OUTPUT_NC_FILE_2
| true |
statement
| 15 | 14 | false | true |
[
"OUTPUT_NC_FILE",
"OUTPUT_NC_FILE_2",
"OUTPUT_UNKNOWN_FORMAT",
"OUTPUT_ZARR_STORE",
"test_read_dataset_not_implemented_error",
"cleanup",
"test_read_dataset_multi_netcdf_success",
"test_read_dataset_netcdf_success",
"test_read_dataset_xr_da_ecad_index_error",
"test_read_dataset_xr_da_ecad_index_success",
"test_read_dataset_xr_da_user_index_error",
"test_read_dataset_xr_da_user_index_success",
"test_read_dataset_xr_ds_success",
"test_read_dataset_zarr_store_success",
"for",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "cleanup",
"type": "function"
},
{
"name": "OUTPUT_NC_FILE",
"type": "statement"
},
{
"name": "OUTPUT_NC_FILE_2",
"type": "statement"
},
{
"name": "OUTPUT_UNKNOWN_FORMAT",
"type": "statement"
},
{
"name": "OUTPUT_ZARR_STORE",
"type": "statement"
},
{
"name": "test_read_dataset_multi_netcdf_success",
"type": "function"
},
{
"name": "test_read_dataset_netcdf_success",
"type": "function"
},
{
"name": "test_read_dataset_not_implemented_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_ecad_index_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_ecad_index_success",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_user_index_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_user_index_success",
"type": "function"
},
{
"name": "test_read_dataset_xr_ds_success",
"type": "function"
},
{
"name": "test_read_dataset_zarr_store_success",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import os
import shutil
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from icclim.icclim_exceptions import InvalidIcclimArgumentError
from icclim.models.ecad_indices import EcadIndex
from icclim.pre_processing.input_parsing import read_dataset, update_to_standard_coords
def test_update_to_standard_coords():
# GIVEN
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
# WHEN
res, revert = update_to_standard_coords(ds)
# THEN
assert "lat" in res.coords
assert "time" in res.coords
assert "lon" in res.coords
assert res.rename(revert).coords.keys() == ds.coords.keys()
class Test_ReadDataset:
OUTPUT_NC_FILE = "tmp.nc"
OUTPUT_NC_FILE_2 = "tmp-2.nc"
OUTPUT_ZARR_STORE = "tmp.zarr"
OUTPUT_UNKNOWN_FORMAT = "tmp.cacahuete"
@pytest.fixture(autouse=True)
def cleanup(self):
# setup
yield
# teardown
shutil.rmtree(self.OUTPUT_ZARR_STORE, ignore_errors=True)
for f in [
self.OUTPUT_NC_FILE,
self.
|
156 | 7 | 1,499 |
cerfacs-globc__icclim
|
7d571ec4d1e1b1fcc1433bb178de2bc0f2f2f0b7
|
icclim/tests/unit_tests/test_input_parsing.py
|
Unknown
|
OUTPUT_UNKNOWN_FORMAT
| true |
statement
| 15 | 14 | false | true |
[
"OUTPUT_NC_FILE_2",
"OUTPUT_NC_FILE",
"OUTPUT_ZARR_STORE",
"test_read_dataset_not_implemented_error",
"test_read_dataset_xr_da_ecad_index_error",
"cleanup",
"OUTPUT_UNKNOWN_FORMAT",
"test_read_dataset_multi_netcdf_success",
"test_read_dataset_netcdf_success",
"test_read_dataset_xr_da_ecad_index_success",
"test_read_dataset_xr_da_user_index_error",
"test_read_dataset_xr_da_user_index_success",
"test_read_dataset_xr_ds_success",
"test_read_dataset_zarr_store_success",
"for",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "cleanup",
"type": "function"
},
{
"name": "OUTPUT_NC_FILE",
"type": "statement"
},
{
"name": "OUTPUT_NC_FILE_2",
"type": "statement"
},
{
"name": "OUTPUT_UNKNOWN_FORMAT",
"type": "statement"
},
{
"name": "OUTPUT_ZARR_STORE",
"type": "statement"
},
{
"name": "test_read_dataset_multi_netcdf_success",
"type": "function"
},
{
"name": "test_read_dataset_netcdf_success",
"type": "function"
},
{
"name": "test_read_dataset_not_implemented_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_ecad_index_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_ecad_index_success",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_user_index_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_user_index_success",
"type": "function"
},
{
"name": "test_read_dataset_xr_ds_success",
"type": "function"
},
{
"name": "test_read_dataset_zarr_store_success",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import os
import shutil
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from icclim.icclim_exceptions import InvalidIcclimArgumentError
from icclim.models.ecad_indices import EcadIndex
from icclim.pre_processing.input_parsing import read_dataset, update_to_standard_coords
def test_update_to_standard_coords():
# GIVEN
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
# WHEN
res, revert = update_to_standard_coords(ds)
# THEN
assert "lat" in res.coords
assert "time" in res.coords
assert "lon" in res.coords
assert res.rename(revert).coords.keys() == ds.coords.keys()
class Test_ReadDataset:
OUTPUT_NC_FILE = "tmp.nc"
OUTPUT_NC_FILE_2 = "tmp-2.nc"
OUTPUT_ZARR_STORE = "tmp.zarr"
OUTPUT_UNKNOWN_FORMAT = "tmp.cacahuete"
@pytest.fixture(autouse=True)
def cleanup(self):
# setup
yield
# teardown
shutil.rmtree(self.OUTPUT_ZARR_STORE, ignore_errors=True)
for f in [
self.OUTPUT_NC_FILE,
self.OUTPUT_NC_FILE_2,
self.
|
157 | 7 | 2,688 |
cerfacs-globc__icclim
|
7d571ec4d1e1b1fcc1433bb178de2bc0f2f2f0b7
|
icclim/tests/unit_tests/test_input_parsing.py
|
inproject
|
WW
| true |
instance
| 53 | 62 | false | true |
[
"TX90P",
"name",
"CD",
"CW",
"SD",
"CDD",
"CFD",
"CSDI",
"CSU",
"CWD",
"DTR",
"ETR",
"FD",
"GD4",
"HD17",
"ID",
"lookup",
"PRCPTOT",
"R10MM",
"R20MM",
"R75P",
"R75PTOT",
"R95P",
"R95PTOT",
"R99P",
"R99PTOT",
"RR1",
"RX1DAY",
"RX5DAY",
"SD1",
"SD5CM",
"SD50CM",
"SDII",
"SU",
"TG",
"TG10P",
"TG90P",
"TN",
"TN10P",
"TN90P",
"TNN",
"TNX",
"TR",
"TX",
"TX10P",
"TXN",
"TXX",
"VDTR",
"WD",
"WSDI",
"WW",
"mro",
"value",
"__init__",
"__annotations__",
"__base__",
"__bases__",
"__basicsize__",
"__bool__",
"__call__",
"__contains__",
"__delattr__",
"__dict__",
"__dictoffset__",
"__dir__",
"__eq__",
"__flags__",
"__format__",
"__getattribute__",
"__getitem__",
"__hash__",
"__init_subclass__",
"__instancecheck__",
"__itemsize__",
"__iter__",
"__len__",
"__members__",
"__mro__",
"__name__",
"__ne__",
"__new__",
"__or__",
"__order__",
"__prepare__",
"__qualname__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__reversed__",
"__ror__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasscheck__",
"__subclasses__",
"__subclasshook__",
"__text_signature__",
"__weakrefoffset__",
"__class__",
"__doc__",
"__module__"
] |
[
{
"name": "CD",
"type": "instance"
},
{
"name": "CDD",
"type": "instance"
},
{
"name": "CFD",
"type": "instance"
},
{
"name": "CSDI",
"type": "instance"
},
{
"name": "CSU",
"type": "instance"
},
{
"name": "CW",
"type": "instance"
},
{
"name": "CWD",
"type": "instance"
},
{
"name": "DTR",
"type": "instance"
},
{
"name": "ETR",
"type": "instance"
},
{
"name": "FD",
"type": "instance"
},
{
"name": "GD4",
"type": "instance"
},
{
"name": "HD17",
"type": "instance"
},
{
"name": "ID",
"type": "instance"
},
{
"name": "lookup",
"type": "function"
},
{
"name": "mro",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "PRCPTOT",
"type": "instance"
},
{
"name": "R10MM",
"type": "instance"
},
{
"name": "R20MM",
"type": "instance"
},
{
"name": "R75P",
"type": "instance"
},
{
"name": "R75PTOT",
"type": "instance"
},
{
"name": "R95P",
"type": "instance"
},
{
"name": "R95PTOT",
"type": "instance"
},
{
"name": "R99P",
"type": "instance"
},
{
"name": "R99PTOT",
"type": "instance"
},
{
"name": "RR1",
"type": "instance"
},
{
"name": "RX1DAY",
"type": "instance"
},
{
"name": "RX5DAY",
"type": "instance"
},
{
"name": "SD",
"type": "instance"
},
{
"name": "SD1",
"type": "instance"
},
{
"name": "SD50CM",
"type": "instance"
},
{
"name": "SD5CM",
"type": "instance"
},
{
"name": "SDII",
"type": "instance"
},
{
"name": "SU",
"type": "instance"
},
{
"name": "TG",
"type": "instance"
},
{
"name": "TG10P",
"type": "instance"
},
{
"name": "TG90P",
"type": "instance"
},
{
"name": "TN",
"type": "instance"
},
{
"name": "TN10P",
"type": "instance"
},
{
"name": "TN90P",
"type": "instance"
},
{
"name": "TNN",
"type": "instance"
},
{
"name": "TNX",
"type": "instance"
},
{
"name": "TR",
"type": "instance"
},
{
"name": "TX",
"type": "instance"
},
{
"name": "TX10P",
"type": "instance"
},
{
"name": "TX90P",
"type": "instance"
},
{
"name": "TXN",
"type": "instance"
},
{
"name": "TXX",
"type": "instance"
},
{
"name": "value",
"type": "statement"
},
{
"name": "VDTR",
"type": "instance"
},
{
"name": "WD",
"type": "instance"
},
{
"name": "WSDI",
"type": "instance"
},
{
"name": "WW",
"type": "instance"
},
{
"name": "_generate_next_value_",
"type": "function"
},
{
"name": "_ignore_",
"type": "statement"
},
{
"name": "_member_map_",
"type": "statement"
},
{
"name": "_member_names_",
"type": "statement"
},
{
"name": "_missing_",
"type": "function"
},
{
"name": "_name_",
"type": "statement"
},
{
"name": "_order_",
"type": "statement"
},
{
"name": "_value2member_map_",
"type": "statement"
},
{
"name": "_value_",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__base__",
"type": "statement"
},
{
"name": "__bases__",
"type": "statement"
},
{
"name": "__basicsize__",
"type": "statement"
},
{
"name": "__call__",
"type": "function"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dictoffset__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__flags__",
"type": "statement"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__instancecheck__",
"type": "function"
},
{
"name": "__itemsize__",
"type": "statement"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__mro__",
"type": "statement"
},
{
"name": "__name__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__order__",
"type": "statement"
},
{
"name": "__prepare__",
"type": "function"
},
{
"name": "__qualname__",
"type": "statement"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
},
{
"name": "__subclasscheck__",
"type": "function"
},
{
"name": "__subclasses__",
"type": "function"
},
{
"name": "__text_signature__",
"type": "statement"
},
{
"name": "__weakrefoffset__",
"type": "statement"
}
] |
import os
import shutil
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from icclim.icclim_exceptions import InvalidIcclimArgumentError
from icclim.models.ecad_indices import EcadIndex
from icclim.pre_processing.input_parsing import read_dataset, update_to_standard_coords
def test_update_to_standard_coords():
# GIVEN
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
# WHEN
res, revert = update_to_standard_coords(ds)
# THEN
assert "lat" in res.coords
assert "time" in res.coords
assert "lon" in res.coords
assert res.rename(revert).coords.keys() == ds.coords.keys()
class Test_ReadDataset:
OUTPUT_NC_FILE = "tmp.nc"
OUTPUT_NC_FILE_2 = "tmp-2.nc"
OUTPUT_ZARR_STORE = "tmp.zarr"
OUTPUT_UNKNOWN_FORMAT = "tmp.cacahuete"
@pytest.fixture(autouse=True)
def cleanup(self):
# setup
yield
# teardown
shutil.rmtree(self.OUTPUT_ZARR_STORE, ignore_errors=True)
for f in [
self.OUTPUT_NC_FILE,
self.OUTPUT_NC_FILE_2,
self.OUTPUT_UNKNOWN_FORMAT,
]:
try:
os.remove(f)
except FileNotFoundError:
pass
def test_read_dataset_xr_da_user_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da)
def test_read_dataset_xr_da_ecad_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da, EcadIndex.
|
158 | 7 | 3,183 |
cerfacs-globc__icclim
|
7d571ec4d1e1b1fcc1433bb178de2bc0f2f2f0b7
|
icclim/tests/unit_tests/test_input_parsing.py
|
inproject
|
TX90P
| true |
instance
| 53 | 62 | false | true |
[
"WW",
"name",
"CD",
"CW",
"SD",
"CDD",
"CFD",
"CSDI",
"CSU",
"CWD",
"DTR",
"ETR",
"FD",
"GD4",
"HD17",
"ID",
"lookup",
"PRCPTOT",
"R10MM",
"R20MM",
"R75P",
"R75PTOT",
"R95P",
"R95PTOT",
"R99P",
"R99PTOT",
"RR1",
"RX1DAY",
"RX5DAY",
"SD1",
"SD5CM",
"SD50CM",
"SDII",
"SU",
"TG",
"TG10P",
"TG90P",
"TN",
"TN10P",
"TN90P",
"TNN",
"TNX",
"TR",
"TX",
"TX10P",
"TX90P",
"TXN",
"TXX",
"VDTR",
"WD",
"WSDI",
"mro",
"value",
"__init__",
"__annotations__",
"__base__",
"__bases__",
"__basicsize__",
"__bool__",
"__call__",
"__contains__",
"__delattr__",
"__dict__",
"__dictoffset__",
"__dir__",
"__eq__",
"__flags__",
"__format__",
"__getattribute__",
"__getitem__",
"__hash__",
"__init_subclass__",
"__instancecheck__",
"__itemsize__",
"__iter__",
"__len__",
"__members__",
"__mro__",
"__name__",
"__ne__",
"__new__",
"__or__",
"__order__",
"__prepare__",
"__qualname__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__reversed__",
"__ror__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasscheck__",
"__subclasses__",
"__subclasshook__",
"__text_signature__",
"__weakrefoffset__",
"__class__",
"__doc__",
"__module__"
] |
[
{
"name": "CD",
"type": "instance"
},
{
"name": "CDD",
"type": "instance"
},
{
"name": "CFD",
"type": "instance"
},
{
"name": "CSDI",
"type": "instance"
},
{
"name": "CSU",
"type": "instance"
},
{
"name": "CW",
"type": "instance"
},
{
"name": "CWD",
"type": "instance"
},
{
"name": "DTR",
"type": "instance"
},
{
"name": "ETR",
"type": "instance"
},
{
"name": "FD",
"type": "instance"
},
{
"name": "GD4",
"type": "instance"
},
{
"name": "HD17",
"type": "instance"
},
{
"name": "ID",
"type": "instance"
},
{
"name": "lookup",
"type": "function"
},
{
"name": "mro",
"type": "function"
},
{
"name": "name",
"type": "statement"
},
{
"name": "PRCPTOT",
"type": "instance"
},
{
"name": "R10MM",
"type": "instance"
},
{
"name": "R20MM",
"type": "instance"
},
{
"name": "R75P",
"type": "instance"
},
{
"name": "R75PTOT",
"type": "instance"
},
{
"name": "R95P",
"type": "instance"
},
{
"name": "R95PTOT",
"type": "instance"
},
{
"name": "R99P",
"type": "instance"
},
{
"name": "R99PTOT",
"type": "instance"
},
{
"name": "RR1",
"type": "instance"
},
{
"name": "RX1DAY",
"type": "instance"
},
{
"name": "RX5DAY",
"type": "instance"
},
{
"name": "SD",
"type": "instance"
},
{
"name": "SD1",
"type": "instance"
},
{
"name": "SD50CM",
"type": "instance"
},
{
"name": "SD5CM",
"type": "instance"
},
{
"name": "SDII",
"type": "instance"
},
{
"name": "SU",
"type": "instance"
},
{
"name": "TG",
"type": "instance"
},
{
"name": "TG10P",
"type": "instance"
},
{
"name": "TG90P",
"type": "instance"
},
{
"name": "TN",
"type": "instance"
},
{
"name": "TN10P",
"type": "instance"
},
{
"name": "TN90P",
"type": "instance"
},
{
"name": "TNN",
"type": "instance"
},
{
"name": "TNX",
"type": "instance"
},
{
"name": "TR",
"type": "instance"
},
{
"name": "TX",
"type": "instance"
},
{
"name": "TX10P",
"type": "instance"
},
{
"name": "TX90P",
"type": "instance"
},
{
"name": "TXN",
"type": "instance"
},
{
"name": "TXX",
"type": "instance"
},
{
"name": "value",
"type": "statement"
},
{
"name": "VDTR",
"type": "instance"
},
{
"name": "WD",
"type": "instance"
},
{
"name": "WSDI",
"type": "instance"
},
{
"name": "WW",
"type": "instance"
},
{
"name": "_generate_next_value_",
"type": "function"
},
{
"name": "_ignore_",
"type": "statement"
},
{
"name": "_member_map_",
"type": "statement"
},
{
"name": "_member_names_",
"type": "statement"
},
{
"name": "_missing_",
"type": "function"
},
{
"name": "_name_",
"type": "statement"
},
{
"name": "_order_",
"type": "statement"
},
{
"name": "_value2member_map_",
"type": "statement"
},
{
"name": "_value_",
"type": "statement"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__base__",
"type": "statement"
},
{
"name": "__bases__",
"type": "statement"
},
{
"name": "__basicsize__",
"type": "statement"
},
{
"name": "__call__",
"type": "function"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dictoffset__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__flags__",
"type": "statement"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__instancecheck__",
"type": "function"
},
{
"name": "__itemsize__",
"type": "statement"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__mro__",
"type": "statement"
},
{
"name": "__name__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__order__",
"type": "statement"
},
{
"name": "__prepare__",
"type": "function"
},
{
"name": "__qualname__",
"type": "statement"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
},
{
"name": "__subclasscheck__",
"type": "function"
},
{
"name": "__subclasses__",
"type": "function"
},
{
"name": "__text_signature__",
"type": "statement"
},
{
"name": "__weakrefoffset__",
"type": "statement"
}
] |
import os
import shutil
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from icclim.icclim_exceptions import InvalidIcclimArgumentError
from icclim.models.ecad_indices import EcadIndex
from icclim.pre_processing.input_parsing import read_dataset, update_to_standard_coords
def test_update_to_standard_coords():
# GIVEN
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
# WHEN
res, revert = update_to_standard_coords(ds)
# THEN
assert "lat" in res.coords
assert "time" in res.coords
assert "lon" in res.coords
assert res.rename(revert).coords.keys() == ds.coords.keys()
class Test_ReadDataset:
OUTPUT_NC_FILE = "tmp.nc"
OUTPUT_NC_FILE_2 = "tmp-2.nc"
OUTPUT_ZARR_STORE = "tmp.zarr"
OUTPUT_UNKNOWN_FORMAT = "tmp.cacahuete"
@pytest.fixture(autouse=True)
def cleanup(self):
# setup
yield
# teardown
shutil.rmtree(self.OUTPUT_ZARR_STORE, ignore_errors=True)
for f in [
self.OUTPUT_NC_FILE,
self.OUTPUT_NC_FILE_2,
self.OUTPUT_UNKNOWN_FORMAT,
]:
try:
os.remove(f)
except FileNotFoundError:
pass
def test_read_dataset_xr_da_user_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da)
def test_read_dataset_xr_da_ecad_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da, EcadIndex.WW)
def test_read_dataset_xr_da_ecad_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, EcadIndex.
|
159 | 7 | 5,173 |
cerfacs-globc__icclim
|
7d571ec4d1e1b1fcc1433bb178de2bc0f2f2f0b7
|
icclim/tests/unit_tests/test_input_parsing.py
|
Unknown
|
OUTPUT_NC_FILE
| true |
statement
| 14 | 14 | false | false |
[
"OUTPUT_NC_FILE",
"OUTPUT_ZARR_STORE",
"OUTPUT_NC_FILE_2",
"OUTPUT_UNKNOWN_FORMAT",
"test_read_dataset_xr_ds_success",
"cleanup",
"test_read_dataset_multi_netcdf_success",
"test_read_dataset_netcdf_success",
"test_read_dataset_not_implemented_error",
"test_read_dataset_xr_da_ecad_index_error",
"test_read_dataset_xr_da_ecad_index_success",
"test_read_dataset_xr_da_user_index_error",
"test_read_dataset_xr_da_user_index_success",
"test_read_dataset_zarr_store_success",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "cleanup",
"type": "function"
},
{
"name": "OUTPUT_NC_FILE",
"type": "statement"
},
{
"name": "OUTPUT_NC_FILE_2",
"type": "statement"
},
{
"name": "OUTPUT_UNKNOWN_FORMAT",
"type": "statement"
},
{
"name": "OUTPUT_ZARR_STORE",
"type": "statement"
},
{
"name": "test_read_dataset_multi_netcdf_success",
"type": "function"
},
{
"name": "test_read_dataset_netcdf_success",
"type": "function"
},
{
"name": "test_read_dataset_not_implemented_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_ecad_index_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_ecad_index_success",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_user_index_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_user_index_success",
"type": "function"
},
{
"name": "test_read_dataset_xr_ds_success",
"type": "function"
},
{
"name": "test_read_dataset_zarr_store_success",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import os
import shutil
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from icclim.icclim_exceptions import InvalidIcclimArgumentError
from icclim.models.ecad_indices import EcadIndex
from icclim.pre_processing.input_parsing import read_dataset, update_to_standard_coords
def test_update_to_standard_coords():
# GIVEN
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
# WHEN
res, revert = update_to_standard_coords(ds)
# THEN
assert "lat" in res.coords
assert "time" in res.coords
assert "lon" in res.coords
assert res.rename(revert).coords.keys() == ds.coords.keys()
class Test_ReadDataset:
OUTPUT_NC_FILE = "tmp.nc"
OUTPUT_NC_FILE_2 = "tmp-2.nc"
OUTPUT_ZARR_STORE = "tmp.zarr"
OUTPUT_UNKNOWN_FORMAT = "tmp.cacahuete"
@pytest.fixture(autouse=True)
def cleanup(self):
# setup
yield
# teardown
shutil.rmtree(self.OUTPUT_ZARR_STORE, ignore_errors=True)
for f in [
self.OUTPUT_NC_FILE,
self.OUTPUT_NC_FILE_2,
self.OUTPUT_UNKNOWN_FORMAT,
]:
try:
os.remove(f)
except FileNotFoundError:
pass
def test_read_dataset_xr_da_user_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da)
def test_read_dataset_xr_da_ecad_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da, EcadIndex.WW)
def test_read_dataset_xr_da_ecad_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, EcadIndex.TX90P)
xr.testing.assert_equal(ds_res.tasmax, da)
assert chunk_it is False
def test_read_dataset_xr_da_user_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, None, "doto")
xr.testing.assert_equal(ds_res.doto, da)
assert chunk_it is False
def test_read_dataset_xr_ds_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds_res, chunk_it = read_dataset(ds)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is False
def test_read_dataset_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.
|
160 | 7 | 5,234 |
cerfacs-globc__icclim
|
7d571ec4d1e1b1fcc1433bb178de2bc0f2f2f0b7
|
icclim/tests/unit_tests/test_input_parsing.py
|
inproject
|
OUTPUT_NC_FILE
| true |
statement
| 14 | 14 | false | false |
[
"OUTPUT_NC_FILE",
"OUTPUT_ZARR_STORE",
"OUTPUT_NC_FILE_2",
"test_read_dataset_xr_ds_success",
"OUTPUT_UNKNOWN_FORMAT",
"cleanup",
"test_read_dataset_multi_netcdf_success",
"test_read_dataset_netcdf_success",
"test_read_dataset_not_implemented_error",
"test_read_dataset_xr_da_ecad_index_error",
"test_read_dataset_xr_da_ecad_index_success",
"test_read_dataset_xr_da_user_index_error",
"test_read_dataset_xr_da_user_index_success",
"test_read_dataset_zarr_store_success",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "cleanup",
"type": "function"
},
{
"name": "OUTPUT_NC_FILE",
"type": "statement"
},
{
"name": "OUTPUT_NC_FILE_2",
"type": "statement"
},
{
"name": "OUTPUT_UNKNOWN_FORMAT",
"type": "statement"
},
{
"name": "OUTPUT_ZARR_STORE",
"type": "statement"
},
{
"name": "test_read_dataset_multi_netcdf_success",
"type": "function"
},
{
"name": "test_read_dataset_netcdf_success",
"type": "function"
},
{
"name": "test_read_dataset_not_implemented_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_ecad_index_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_ecad_index_success",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_user_index_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_user_index_success",
"type": "function"
},
{
"name": "test_read_dataset_xr_ds_success",
"type": "function"
},
{
"name": "test_read_dataset_zarr_store_success",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import os
import shutil
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from icclim.icclim_exceptions import InvalidIcclimArgumentError
from icclim.models.ecad_indices import EcadIndex
from icclim.pre_processing.input_parsing import read_dataset, update_to_standard_coords
def test_update_to_standard_coords():
# GIVEN
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
# WHEN
res, revert = update_to_standard_coords(ds)
# THEN
assert "lat" in res.coords
assert "time" in res.coords
assert "lon" in res.coords
assert res.rename(revert).coords.keys() == ds.coords.keys()
class Test_ReadDataset:
OUTPUT_NC_FILE = "tmp.nc"
OUTPUT_NC_FILE_2 = "tmp-2.nc"
OUTPUT_ZARR_STORE = "tmp.zarr"
OUTPUT_UNKNOWN_FORMAT = "tmp.cacahuete"
@pytest.fixture(autouse=True)
def cleanup(self):
# setup
yield
# teardown
shutil.rmtree(self.OUTPUT_ZARR_STORE, ignore_errors=True)
for f in [
self.OUTPUT_NC_FILE,
self.OUTPUT_NC_FILE_2,
self.OUTPUT_UNKNOWN_FORMAT,
]:
try:
os.remove(f)
except FileNotFoundError:
pass
def test_read_dataset_xr_da_user_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da)
def test_read_dataset_xr_da_ecad_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da, EcadIndex.WW)
def test_read_dataset_xr_da_ecad_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, EcadIndex.TX90P)
xr.testing.assert_equal(ds_res.tasmax, da)
assert chunk_it is False
def test_read_dataset_xr_da_user_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, None, "doto")
xr.testing.assert_equal(ds_res.doto, da)
assert chunk_it is False
def test_read_dataset_xr_ds_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds_res, chunk_it = read_dataset(ds)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is False
def test_read_dataset_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds_res, chunk_it = read_dataset(self.
|
161 | 7 | 5,952 |
cerfacs-globc__icclim
|
7d571ec4d1e1b1fcc1433bb178de2bc0f2f2f0b7
|
icclim/tests/unit_tests/test_input_parsing.py
|
Unknown
|
OUTPUT_NC_FILE
| true |
statement
| 14 | 14 | false | false |
[
"OUTPUT_NC_FILE",
"OUTPUT_ZARR_STORE",
"OUTPUT_NC_FILE_2",
"OUTPUT_UNKNOWN_FORMAT",
"test_read_dataset_netcdf_success",
"cleanup",
"test_read_dataset_multi_netcdf_success",
"test_read_dataset_not_implemented_error",
"test_read_dataset_xr_da_ecad_index_error",
"test_read_dataset_xr_da_ecad_index_success",
"test_read_dataset_xr_da_user_index_error",
"test_read_dataset_xr_da_user_index_success",
"test_read_dataset_xr_ds_success",
"test_read_dataset_zarr_store_success",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "cleanup",
"type": "function"
},
{
"name": "OUTPUT_NC_FILE",
"type": "statement"
},
{
"name": "OUTPUT_NC_FILE_2",
"type": "statement"
},
{
"name": "OUTPUT_UNKNOWN_FORMAT",
"type": "statement"
},
{
"name": "OUTPUT_ZARR_STORE",
"type": "statement"
},
{
"name": "test_read_dataset_multi_netcdf_success",
"type": "function"
},
{
"name": "test_read_dataset_netcdf_success",
"type": "function"
},
{
"name": "test_read_dataset_not_implemented_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_ecad_index_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_ecad_index_success",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_user_index_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_user_index_success",
"type": "function"
},
{
"name": "test_read_dataset_xr_ds_success",
"type": "function"
},
{
"name": "test_read_dataset_zarr_store_success",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import os
import shutil
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from icclim.icclim_exceptions import InvalidIcclimArgumentError
from icclim.models.ecad_indices import EcadIndex
from icclim.pre_processing.input_parsing import read_dataset, update_to_standard_coords
def test_update_to_standard_coords():
# GIVEN
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
# WHEN
res, revert = update_to_standard_coords(ds)
# THEN
assert "lat" in res.coords
assert "time" in res.coords
assert "lon" in res.coords
assert res.rename(revert).coords.keys() == ds.coords.keys()
class Test_ReadDataset:
OUTPUT_NC_FILE = "tmp.nc"
OUTPUT_NC_FILE_2 = "tmp-2.nc"
OUTPUT_ZARR_STORE = "tmp.zarr"
OUTPUT_UNKNOWN_FORMAT = "tmp.cacahuete"
@pytest.fixture(autouse=True)
def cleanup(self):
# setup
yield
# teardown
shutil.rmtree(self.OUTPUT_ZARR_STORE, ignore_errors=True)
for f in [
self.OUTPUT_NC_FILE,
self.OUTPUT_NC_FILE_2,
self.OUTPUT_UNKNOWN_FORMAT,
]:
try:
os.remove(f)
except FileNotFoundError:
pass
def test_read_dataset_xr_da_user_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da)
def test_read_dataset_xr_da_ecad_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da, EcadIndex.WW)
def test_read_dataset_xr_da_ecad_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, EcadIndex.TX90P)
xr.testing.assert_equal(ds_res.tasmax, da)
assert chunk_it is False
def test_read_dataset_xr_da_user_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, None, "doto")
xr.testing.assert_equal(ds_res.doto, da)
assert chunk_it is False
def test_read_dataset_xr_ds_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds_res, chunk_it = read_dataset(ds)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is False
def test_read_dataset_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds_res, chunk_it = read_dataset(self.OUTPUT_NC_FILE)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_multi_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.
|
162 | 7 | 6,025 |
cerfacs-globc__icclim
|
7d571ec4d1e1b1fcc1433bb178de2bc0f2f2f0b7
|
icclim/tests/unit_tests/test_input_parsing.py
|
Unknown
|
OUTPUT_NC_FILE_2
| true |
statement
| 14 | 14 | false | false |
[
"OUTPUT_NC_FILE",
"OUTPUT_ZARR_STORE",
"OUTPUT_NC_FILE_2",
"OUTPUT_UNKNOWN_FORMAT",
"test_read_dataset_netcdf_success",
"cleanup",
"test_read_dataset_multi_netcdf_success",
"test_read_dataset_not_implemented_error",
"test_read_dataset_xr_da_ecad_index_error",
"test_read_dataset_xr_da_ecad_index_success",
"test_read_dataset_xr_da_user_index_error",
"test_read_dataset_xr_da_user_index_success",
"test_read_dataset_xr_ds_success",
"test_read_dataset_zarr_store_success",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "cleanup",
"type": "function"
},
{
"name": "OUTPUT_NC_FILE",
"type": "statement"
},
{
"name": "OUTPUT_NC_FILE_2",
"type": "statement"
},
{
"name": "OUTPUT_UNKNOWN_FORMAT",
"type": "statement"
},
{
"name": "OUTPUT_ZARR_STORE",
"type": "statement"
},
{
"name": "test_read_dataset_multi_netcdf_success",
"type": "function"
},
{
"name": "test_read_dataset_netcdf_success",
"type": "function"
},
{
"name": "test_read_dataset_not_implemented_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_ecad_index_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_ecad_index_success",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_user_index_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_user_index_success",
"type": "function"
},
{
"name": "test_read_dataset_xr_ds_success",
"type": "function"
},
{
"name": "test_read_dataset_zarr_store_success",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import os
import shutil
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from icclim.icclim_exceptions import InvalidIcclimArgumentError
from icclim.models.ecad_indices import EcadIndex
from icclim.pre_processing.input_parsing import read_dataset, update_to_standard_coords
def test_update_to_standard_coords():
# GIVEN
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
# WHEN
res, revert = update_to_standard_coords(ds)
# THEN
assert "lat" in res.coords
assert "time" in res.coords
assert "lon" in res.coords
assert res.rename(revert).coords.keys() == ds.coords.keys()
class Test_ReadDataset:
OUTPUT_NC_FILE = "tmp.nc"
OUTPUT_NC_FILE_2 = "tmp-2.nc"
OUTPUT_ZARR_STORE = "tmp.zarr"
OUTPUT_UNKNOWN_FORMAT = "tmp.cacahuete"
@pytest.fixture(autouse=True)
def cleanup(self):
# setup
yield
# teardown
shutil.rmtree(self.OUTPUT_ZARR_STORE, ignore_errors=True)
for f in [
self.OUTPUT_NC_FILE,
self.OUTPUT_NC_FILE_2,
self.OUTPUT_UNKNOWN_FORMAT,
]:
try:
os.remove(f)
except FileNotFoundError:
pass
def test_read_dataset_xr_da_user_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da)
def test_read_dataset_xr_da_ecad_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da, EcadIndex.WW)
def test_read_dataset_xr_da_ecad_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, EcadIndex.TX90P)
xr.testing.assert_equal(ds_res.tasmax, da)
assert chunk_it is False
def test_read_dataset_xr_da_user_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, None, "doto")
xr.testing.assert_equal(ds_res.doto, da)
assert chunk_it is False
def test_read_dataset_xr_ds_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds_res, chunk_it = read_dataset(ds)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is False
def test_read_dataset_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds_res, chunk_it = read_dataset(self.OUTPUT_NC_FILE)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_multi_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds.rename({"pouet": "patapouet"}).to_netcdf(self.
|
163 | 7 | 6,104 |
cerfacs-globc__icclim
|
7d571ec4d1e1b1fcc1433bb178de2bc0f2f2f0b7
|
icclim/tests/unit_tests/test_input_parsing.py
|
inproject
|
OUTPUT_NC_FILE
| true |
statement
| 15 | 14 | false | false |
[
"OUTPUT_NC_FILE",
"test_read_dataset_xr_ds_success",
"OUTPUT_NC_FILE_2",
"OUTPUT_ZARR_STORE",
"test_read_dataset_netcdf_success",
"cleanup",
"OUTPUT_UNKNOWN_FORMAT",
"test_read_dataset_multi_netcdf_success",
"test_read_dataset_not_implemented_error",
"test_read_dataset_xr_da_ecad_index_error",
"test_read_dataset_xr_da_ecad_index_success",
"test_read_dataset_xr_da_user_index_error",
"test_read_dataset_xr_da_user_index_success",
"test_read_dataset_zarr_store_success",
"for",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "cleanup",
"type": "function"
},
{
"name": "OUTPUT_NC_FILE",
"type": "statement"
},
{
"name": "OUTPUT_NC_FILE_2",
"type": "statement"
},
{
"name": "OUTPUT_UNKNOWN_FORMAT",
"type": "statement"
},
{
"name": "OUTPUT_ZARR_STORE",
"type": "statement"
},
{
"name": "test_read_dataset_multi_netcdf_success",
"type": "function"
},
{
"name": "test_read_dataset_netcdf_success",
"type": "function"
},
{
"name": "test_read_dataset_not_implemented_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_ecad_index_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_ecad_index_success",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_user_index_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_user_index_success",
"type": "function"
},
{
"name": "test_read_dataset_xr_ds_success",
"type": "function"
},
{
"name": "test_read_dataset_zarr_store_success",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import os
import shutil
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from icclim.icclim_exceptions import InvalidIcclimArgumentError
from icclim.models.ecad_indices import EcadIndex
from icclim.pre_processing.input_parsing import read_dataset, update_to_standard_coords
def test_update_to_standard_coords():
# GIVEN
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
# WHEN
res, revert = update_to_standard_coords(ds)
# THEN
assert "lat" in res.coords
assert "time" in res.coords
assert "lon" in res.coords
assert res.rename(revert).coords.keys() == ds.coords.keys()
class Test_ReadDataset:
OUTPUT_NC_FILE = "tmp.nc"
OUTPUT_NC_FILE_2 = "tmp-2.nc"
OUTPUT_ZARR_STORE = "tmp.zarr"
OUTPUT_UNKNOWN_FORMAT = "tmp.cacahuete"
@pytest.fixture(autouse=True)
def cleanup(self):
# setup
yield
# teardown
shutil.rmtree(self.OUTPUT_ZARR_STORE, ignore_errors=True)
for f in [
self.OUTPUT_NC_FILE,
self.OUTPUT_NC_FILE_2,
self.OUTPUT_UNKNOWN_FORMAT,
]:
try:
os.remove(f)
except FileNotFoundError:
pass
def test_read_dataset_xr_da_user_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da)
def test_read_dataset_xr_da_ecad_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da, EcadIndex.WW)
def test_read_dataset_xr_da_ecad_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, EcadIndex.TX90P)
xr.testing.assert_equal(ds_res.tasmax, da)
assert chunk_it is False
def test_read_dataset_xr_da_user_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, None, "doto")
xr.testing.assert_equal(ds_res.doto, da)
assert chunk_it is False
def test_read_dataset_xr_ds_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds_res, chunk_it = read_dataset(ds)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is False
def test_read_dataset_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds_res, chunk_it = read_dataset(self.OUTPUT_NC_FILE)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_multi_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds.rename({"pouet": "patapouet"}).to_netcdf(self.OUTPUT_NC_FILE_2)
# WHEN
ds_res, chunk_it = read_dataset([self.
|
164 | 7 | 6,125 |
cerfacs-globc__icclim
|
7d571ec4d1e1b1fcc1433bb178de2bc0f2f2f0b7
|
icclim/tests/unit_tests/test_input_parsing.py
|
inproject
|
OUTPUT_NC_FILE_2
| true |
statement
| 15 | 14 | false | false |
[
"OUTPUT_NC_FILE_2",
"OUTPUT_NC_FILE",
"OUTPUT_UNKNOWN_FORMAT",
"OUTPUT_ZARR_STORE",
"test_read_dataset_xr_ds_success",
"cleanup",
"test_read_dataset_multi_netcdf_success",
"test_read_dataset_netcdf_success",
"test_read_dataset_not_implemented_error",
"test_read_dataset_xr_da_ecad_index_error",
"test_read_dataset_xr_da_ecad_index_success",
"test_read_dataset_xr_da_user_index_error",
"test_read_dataset_xr_da_user_index_success",
"test_read_dataset_zarr_store_success",
"for",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "cleanup",
"type": "function"
},
{
"name": "OUTPUT_NC_FILE",
"type": "statement"
},
{
"name": "OUTPUT_NC_FILE_2",
"type": "statement"
},
{
"name": "OUTPUT_UNKNOWN_FORMAT",
"type": "statement"
},
{
"name": "OUTPUT_ZARR_STORE",
"type": "statement"
},
{
"name": "test_read_dataset_multi_netcdf_success",
"type": "function"
},
{
"name": "test_read_dataset_netcdf_success",
"type": "function"
},
{
"name": "test_read_dataset_not_implemented_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_ecad_index_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_ecad_index_success",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_user_index_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_user_index_success",
"type": "function"
},
{
"name": "test_read_dataset_xr_ds_success",
"type": "function"
},
{
"name": "test_read_dataset_zarr_store_success",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import os
import shutil
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from icclim.icclim_exceptions import InvalidIcclimArgumentError
from icclim.models.ecad_indices import EcadIndex
from icclim.pre_processing.input_parsing import read_dataset, update_to_standard_coords
def test_update_to_standard_coords():
# GIVEN
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
# WHEN
res, revert = update_to_standard_coords(ds)
# THEN
assert "lat" in res.coords
assert "time" in res.coords
assert "lon" in res.coords
assert res.rename(revert).coords.keys() == ds.coords.keys()
class Test_ReadDataset:
OUTPUT_NC_FILE = "tmp.nc"
OUTPUT_NC_FILE_2 = "tmp-2.nc"
OUTPUT_ZARR_STORE = "tmp.zarr"
OUTPUT_UNKNOWN_FORMAT = "tmp.cacahuete"
@pytest.fixture(autouse=True)
def cleanup(self):
# setup
yield
# teardown
shutil.rmtree(self.OUTPUT_ZARR_STORE, ignore_errors=True)
for f in [
self.OUTPUT_NC_FILE,
self.OUTPUT_NC_FILE_2,
self.OUTPUT_UNKNOWN_FORMAT,
]:
try:
os.remove(f)
except FileNotFoundError:
pass
def test_read_dataset_xr_da_user_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da)
def test_read_dataset_xr_da_ecad_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da, EcadIndex.WW)
def test_read_dataset_xr_da_ecad_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, EcadIndex.TX90P)
xr.testing.assert_equal(ds_res.tasmax, da)
assert chunk_it is False
def test_read_dataset_xr_da_user_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, None, "doto")
xr.testing.assert_equal(ds_res.doto, da)
assert chunk_it is False
def test_read_dataset_xr_ds_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds_res, chunk_it = read_dataset(ds)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is False
def test_read_dataset_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds_res, chunk_it = read_dataset(self.OUTPUT_NC_FILE)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_multi_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds.rename({"pouet": "patapouet"}).to_netcdf(self.OUTPUT_NC_FILE_2)
# WHEN
ds_res, chunk_it = read_dataset([self.OUTPUT_NC_FILE, self.
|
165 | 7 | 6,917 |
cerfacs-globc__icclim
|
7d571ec4d1e1b1fcc1433bb178de2bc0f2f2f0b7
|
icclim/tests/unit_tests/test_input_parsing.py
|
Unknown
|
OUTPUT_ZARR_STORE
| true |
statement
| 14 | 14 | false | false |
[
"OUTPUT_ZARR_STORE",
"OUTPUT_NC_FILE",
"OUTPUT_NC_FILE_2",
"OUTPUT_UNKNOWN_FORMAT",
"test_read_dataset_xr_ds_success",
"cleanup",
"test_read_dataset_multi_netcdf_success",
"test_read_dataset_netcdf_success",
"test_read_dataset_not_implemented_error",
"test_read_dataset_xr_da_ecad_index_error",
"test_read_dataset_xr_da_ecad_index_success",
"test_read_dataset_xr_da_user_index_error",
"test_read_dataset_xr_da_user_index_success",
"test_read_dataset_zarr_store_success",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "cleanup",
"type": "function"
},
{
"name": "OUTPUT_NC_FILE",
"type": "statement"
},
{
"name": "OUTPUT_NC_FILE_2",
"type": "statement"
},
{
"name": "OUTPUT_UNKNOWN_FORMAT",
"type": "statement"
},
{
"name": "OUTPUT_ZARR_STORE",
"type": "statement"
},
{
"name": "test_read_dataset_multi_netcdf_success",
"type": "function"
},
{
"name": "test_read_dataset_netcdf_success",
"type": "function"
},
{
"name": "test_read_dataset_not_implemented_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_ecad_index_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_ecad_index_success",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_user_index_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_user_index_success",
"type": "function"
},
{
"name": "test_read_dataset_xr_ds_success",
"type": "function"
},
{
"name": "test_read_dataset_zarr_store_success",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import os
import shutil
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from icclim.icclim_exceptions import InvalidIcclimArgumentError
from icclim.models.ecad_indices import EcadIndex
from icclim.pre_processing.input_parsing import read_dataset, update_to_standard_coords
def test_update_to_standard_coords():
# GIVEN
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
# WHEN
res, revert = update_to_standard_coords(ds)
# THEN
assert "lat" in res.coords
assert "time" in res.coords
assert "lon" in res.coords
assert res.rename(revert).coords.keys() == ds.coords.keys()
class Test_ReadDataset:
OUTPUT_NC_FILE = "tmp.nc"
OUTPUT_NC_FILE_2 = "tmp-2.nc"
OUTPUT_ZARR_STORE = "tmp.zarr"
OUTPUT_UNKNOWN_FORMAT = "tmp.cacahuete"
@pytest.fixture(autouse=True)
def cleanup(self):
# setup
yield
# teardown
shutil.rmtree(self.OUTPUT_ZARR_STORE, ignore_errors=True)
for f in [
self.OUTPUT_NC_FILE,
self.OUTPUT_NC_FILE_2,
self.OUTPUT_UNKNOWN_FORMAT,
]:
try:
os.remove(f)
except FileNotFoundError:
pass
def test_read_dataset_xr_da_user_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da)
def test_read_dataset_xr_da_ecad_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da, EcadIndex.WW)
def test_read_dataset_xr_da_ecad_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, EcadIndex.TX90P)
xr.testing.assert_equal(ds_res.tasmax, da)
assert chunk_it is False
def test_read_dataset_xr_da_user_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, None, "doto")
xr.testing.assert_equal(ds_res.doto, da)
assert chunk_it is False
def test_read_dataset_xr_ds_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds_res, chunk_it = read_dataset(ds)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is False
def test_read_dataset_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds_res, chunk_it = read_dataset(self.OUTPUT_NC_FILE)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_multi_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds.rename({"pouet": "patapouet"}).to_netcdf(self.OUTPUT_NC_FILE_2)
# WHEN
ds_res, chunk_it = read_dataset([self.OUTPUT_NC_FILE, self.OUTPUT_NC_FILE_2])
# THEN
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
xr.testing.assert_equal(ds_res.patapouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_zarr_store_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_zarr(self.
|
166 | 7 | 6,996 |
cerfacs-globc__icclim
|
7d571ec4d1e1b1fcc1433bb178de2bc0f2f2f0b7
|
icclim/tests/unit_tests/test_input_parsing.py
|
inproject
|
OUTPUT_ZARR_STORE
| true |
statement
| 14 | 14 | false | false |
[
"OUTPUT_ZARR_STORE",
"OUTPUT_NC_FILE",
"OUTPUT_NC_FILE_2",
"test_read_dataset_xr_ds_success",
"OUTPUT_UNKNOWN_FORMAT",
"cleanup",
"test_read_dataset_multi_netcdf_success",
"test_read_dataset_netcdf_success",
"test_read_dataset_not_implemented_error",
"test_read_dataset_xr_da_ecad_index_error",
"test_read_dataset_xr_da_ecad_index_success",
"test_read_dataset_xr_da_user_index_error",
"test_read_dataset_xr_da_user_index_success",
"test_read_dataset_zarr_store_success",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "cleanup",
"type": "function"
},
{
"name": "OUTPUT_NC_FILE",
"type": "statement"
},
{
"name": "OUTPUT_NC_FILE_2",
"type": "statement"
},
{
"name": "OUTPUT_UNKNOWN_FORMAT",
"type": "statement"
},
{
"name": "OUTPUT_ZARR_STORE",
"type": "statement"
},
{
"name": "test_read_dataset_multi_netcdf_success",
"type": "function"
},
{
"name": "test_read_dataset_netcdf_success",
"type": "function"
},
{
"name": "test_read_dataset_not_implemented_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_ecad_index_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_ecad_index_success",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_user_index_error",
"type": "function"
},
{
"name": "test_read_dataset_xr_da_user_index_success",
"type": "function"
},
{
"name": "test_read_dataset_xr_ds_success",
"type": "function"
},
{
"name": "test_read_dataset_zarr_store_success",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
import os
import shutil
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from icclim.icclim_exceptions import InvalidIcclimArgumentError
from icclim.models.ecad_indices import EcadIndex
from icclim.pre_processing.input_parsing import read_dataset, update_to_standard_coords
def test_update_to_standard_coords():
# GIVEN
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
# WHEN
res, revert = update_to_standard_coords(ds)
# THEN
assert "lat" in res.coords
assert "time" in res.coords
assert "lon" in res.coords
assert res.rename(revert).coords.keys() == ds.coords.keys()
class Test_ReadDataset:
OUTPUT_NC_FILE = "tmp.nc"
OUTPUT_NC_FILE_2 = "tmp-2.nc"
OUTPUT_ZARR_STORE = "tmp.zarr"
OUTPUT_UNKNOWN_FORMAT = "tmp.cacahuete"
@pytest.fixture(autouse=True)
def cleanup(self):
# setup
yield
# teardown
shutil.rmtree(self.OUTPUT_ZARR_STORE, ignore_errors=True)
for f in [
self.OUTPUT_NC_FILE,
self.OUTPUT_NC_FILE_2,
self.OUTPUT_UNKNOWN_FORMAT,
]:
try:
os.remove(f)
except FileNotFoundError:
pass
def test_read_dataset_xr_da_user_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da)
def test_read_dataset_xr_da_ecad_index_error(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
with pytest.raises(InvalidIcclimArgumentError):
read_dataset(da, EcadIndex.WW)
def test_read_dataset_xr_da_ecad_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, EcadIndex.TX90P)
xr.testing.assert_equal(ds_res.tasmax, da)
assert chunk_it is False
def test_read_dataset_xr_da_user_index_success(self):
da = xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
ds_res, chunk_it = read_dataset(da, None, "doto")
xr.testing.assert_equal(ds_res.doto, da)
assert chunk_it is False
def test_read_dataset_xr_ds_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds_res, chunk_it = read_dataset(ds)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is False
def test_read_dataset_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds_res, chunk_it = read_dataset(self.OUTPUT_NC_FILE)
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_multi_netcdf_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_netcdf(self.OUTPUT_NC_FILE)
ds.rename({"pouet": "patapouet"}).to_netcdf(self.OUTPUT_NC_FILE_2)
# WHEN
ds_res, chunk_it = read_dataset([self.OUTPUT_NC_FILE, self.OUTPUT_NC_FILE_2])
# THEN
xr.testing.assert_equal(ds_res.pouet, ds.pouet)
xr.testing.assert_equal(ds_res.patapouet, ds.pouet)
assert chunk_it is True
def test_read_dataset_zarr_store_success(self):
ds = xr.Dataset(
{
"pouet": xr.DataArray(
data=np.full(10, 42).reshape((10, 1, 1)),
coords=dict(
latitude=[42],
longitude=[42],
t=pd.date_range("2042-01-01", periods=10, freq="D"),
),
dims=["t", "latitude", "longitude"],
name="pr",
attrs={"units": "kg m-2 d-1"},
)
}
)
ds.to_zarr(self.OUTPUT_ZARR_STORE)
# WHEN
ds_res, chunk_it = read_dataset(self.
|
167 | 12 | 1,208 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBCTOJDBC_INPUT_URL
| true |
statement
| 75 | 75 | false | true |
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.
|
168 | 12 | 1,259 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBCTOJDBC_INPUT_URL
| true |
statement
| 75 | 75 | false | false |
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.
|
169 | 12 | 1,408 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBCTOJDBC_INPUT_DRIVER
| true |
statement
| 75 | 75 | false | true |
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.
|
170 | 12 | 1,462 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBCTOJDBC_INPUT_DRIVER
| true |
statement
| 75 | 75 | false | false |
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBCTOJDBC_INPUT_URL",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.
|
171 | 12 | 1,622 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBCTOJDBC_INPUT_TABLE
| true |
statement
| 75 | 75 | false | true |
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.
|
172 | 12 | 1,675 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBCTOJDBC_INPUT_TABLE
| true |
statement
| 75 | 75 | false | false |
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBCTOJDBC_INPUT_URL",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.
|
173 | 12 | 1,833 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBCTOJDBC_INPUT_PARTITIONCOLUMN
| true |
statement
| 75 | 75 | false | true |
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.
|
174 | 12 | 1,896 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBCTOJDBC_INPUT_PARTITIONCOLUMN
| true |
statement
| 75 | 75 | false | false |
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBCTOJDBC_INPUT_URL",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.
|
175 | 12 | 2,106 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBCTOJDBC_INPUT_LOWERBOUND
| true |
statement
| 75 | 75 | false | true |
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.
|
176 | 12 | 2,164 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBCTOJDBC_INPUT_LOWERBOUND
| true |
statement
| 75 | 75 | false | false |
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBCTOJDBC_INPUT_URL",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.
|
177 | 12 | 2,421 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBCTOJDBC_INPUT_UPPERBOUND
| true |
statement
| 75 | 75 | false | true |
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.
|
178 | 12 | 2,479 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBCTOJDBC_INPUT_UPPERBOUND
| true |
statement
| 75 | 75 | false | false |
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBCTOJDBC_INPUT_URL",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.
|
179 | 12 | 2,736 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBCTOJDBC_NUMPARTITIONS
| true |
statement
| 75 | 75 | false | true |
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.
|
180 | 12 | 2,791 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBCTOJDBC_NUMPARTITIONS
| true |
statement
| 75 | 75 | false | false |
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBCTOJDBC_INPUT_URL",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.
|
181 | 12 | 3,068 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
random
|
JDBCTOJDBC_OUTPUT_URL
| true |
statement
| 75 | 75 | false | true |
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.
|
182 | 12 | 3,120 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBCTOJDBC_OUTPUT_URL
| true |
statement
| 75 | 75 | false | false |
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBCTOJDBC_INPUT_URL",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.
|
183 | 12 | 3,271 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBCTOJDBC_OUTPUT_DRIVER
| true |
statement
| 75 | 75 | false | true |
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.
|
184 | 12 | 3,326 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBCTOJDBC_OUTPUT_DRIVER
| true |
statement
| 75 | 75 | false | false |
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBCTOJDBC_INPUT_URL",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.
|
185 | 12 | 3,488 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBCTOJDBC_OUTPUT_TABLE
| true |
statement
| 75 | 75 | false | true |
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.
|
186 | 12 | 3,542 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBCTOJDBC_OUTPUT_TABLE
| true |
statement
| 75 | 75 | false | false |
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBCTOJDBC_INPUT_URL",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.
|
187 | 12 | 3,702 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION
| true |
statement
| 75 | 75 | false | true |
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.
|
188 | 12 | 3,770 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION
| true |
statement
| 75 | 75 | false | false |
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBCTOJDBC_INPUT_URL",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.
|
189 | 12 | 4,051 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBCTOJDBC_OUTPUT_MODE
| true |
statement
| 75 | 75 | false | true |
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.
|
190 | 12 | 4,104 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBCTOJDBC_OUTPUT_MODE
| true |
statement
| 75 | 75 | false | false |
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBCTOJDBC_INPUT_URL",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.
|
191 | 12 | 4,186 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
OUTPUT_MODE_APPEND
| true |
statement
| 75 | 75 | false | true |
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBCTOJDBC_INPUT_URL",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.
|
192 | 12 | 4,431 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
OUTPUT_MODE_OVERWRITE
| true |
statement
| 76 | 75 | false | true |
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBCTOJDBC_INPUT_URL",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"for"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.
|
193 | 12 | 4,480 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
random
|
OUTPUT_MODE_APPEND
| true |
statement
| 76 | 75 | false | false |
[
"OUTPUT_MODE_IGNORE",
"JDBC_URL",
"JDBC_TABLE",
"OUTPUT_MODE_ERRORIFEXISTS",
"FORMAT_JDBC",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"for"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.
|
194 | 12 | 4,526 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
OUTPUT_MODE_IGNORE
| true |
statement
| 76 | 75 | false | true |
[
"OUTPUT_MODE_APPEND",
"JDBC_URL",
"JDBC_TABLE",
"OUTPUT_MODE_ERRORIFEXISTS",
"FORMAT_JDBC",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"for"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.
|
195 | 12 | 4,572 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
OUTPUT_MODE_ERRORIFEXISTS
| true |
statement
| 76 | 75 | false | true |
[
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_IGNORE",
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET",
"for"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.
|
196 | 12 | 4,678 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBCTOJDBC_OUTPUT_BATCH_SIZE
| true |
statement
| 75 | 75 | false | true |
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.
|
197 | 12 | 4,737 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBCTOJDBC_OUTPUT_BATCH_SIZE
| true |
statement
| 75 | 75 | false | false |
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"OUTPUT_MODE_APPEND",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.
|
198 | 12 | 5,124 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
inproject
|
get_logger
| true |
function
| 4 | 4 | false | true |
[
"parse_args",
"run",
"get_logger",
"build",
"__str__",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "build",
"type": "function"
},
{
"name": "get_logger",
"type": "function"
},
{
"name": "parse_args",
"type": "function"
},
{
"name": "run",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.
|
199 | 12 | 5,214 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBCTOJDBC_INPUT_URL
| true |
statement
| 75 | 75 | false | false |
[
"JDBC_URL",
"FORMAT_JDBC",
"JDBC_TABLE",
"JDBC_DRIVER",
"JDBCTOJDBC_INPUT_URL",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.get_logger(spark=spark)
# Arguments
input_jdbc_url: str = args[constants.
|
200 | 12 | 5,284 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBCTOJDBC_INPUT_DRIVER
| true |
statement
| 75 | 75 | false | false |
[
"JDBC_DRIVER",
"FORMAT_JDBC",
"JDBC_URL",
"JDBC_TABLE",
"JDBCTOJDBC_INPUT_DRIVER",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.get_logger(spark=spark)
# Arguments
input_jdbc_url: str = args[constants.JDBCTOJDBC_INPUT_URL]
input_jdbc_driver: str = args[constants.
|
201 | 12 | 5,356 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBCTOJDBC_INPUT_TABLE
| true |
statement
| 75 | 75 | false | false |
[
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_URL",
"JDBC_DRIVER",
"JDBCTOJDBC_INPUT_TABLE",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.get_logger(spark=spark)
# Arguments
input_jdbc_url: str = args[constants.JDBCTOJDBC_INPUT_URL]
input_jdbc_driver: str = args[constants.JDBCTOJDBC_INPUT_DRIVER]
input_jdbc_table: str = args[constants.
|
202 | 12 | 5,437 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBCTOJDBC_INPUT_PARTITIONCOLUMN
| true |
statement
| 75 | 75 | false | false |
[
"JDBC_PARTITIONCOLUMN",
"FORMAT_JDBC",
"JDBC_URL",
"JDBC_TABLE",
"JDBC_DRIVER",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.get_logger(spark=spark)
# Arguments
input_jdbc_url: str = args[constants.JDBCTOJDBC_INPUT_URL]
input_jdbc_driver: str = args[constants.JDBCTOJDBC_INPUT_DRIVER]
input_jdbc_table: str = args[constants.JDBCTOJDBC_INPUT_TABLE]
input_jdbc_partitioncolumn: str = args[constants.
|
203 | 12 | 5,523 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBCTOJDBC_INPUT_LOWERBOUND
| true |
statement
| 75 | 75 | false | false |
[
"JDBC_LOWERBOUND",
"JDBC_URL",
"FORMAT_JDBC",
"JDBC_TABLE",
"JDBC_DRIVER",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.get_logger(spark=spark)
# Arguments
input_jdbc_url: str = args[constants.JDBCTOJDBC_INPUT_URL]
input_jdbc_driver: str = args[constants.JDBCTOJDBC_INPUT_DRIVER]
input_jdbc_table: str = args[constants.JDBCTOJDBC_INPUT_TABLE]
input_jdbc_partitioncolumn: str = args[constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN]
input_jdbc_lowerbound: str = args[constants.
|
204 | 12 | 5,604 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBCTOJDBC_INPUT_UPPERBOUND
| true |
statement
| 75 | 75 | false | false |
[
"JDBC_UPPERBOUND",
"JDBC_URL",
"FORMAT_JDBC",
"JDBC_TABLE",
"JDBC_DRIVER",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.get_logger(spark=spark)
# Arguments
input_jdbc_url: str = args[constants.JDBCTOJDBC_INPUT_URL]
input_jdbc_driver: str = args[constants.JDBCTOJDBC_INPUT_DRIVER]
input_jdbc_table: str = args[constants.JDBCTOJDBC_INPUT_TABLE]
input_jdbc_partitioncolumn: str = args[constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN]
input_jdbc_lowerbound: str = args[constants.JDBCTOJDBC_INPUT_LOWERBOUND]
input_jdbc_upperbound: str = args[constants.
|
205 | 12 | 5,682 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBCTOJDBC_NUMPARTITIONS
| true |
statement
| 75 | 75 | false | false |
[
"JDBC_NUMPARTITIONS",
"JDBC_URL",
"FORMAT_JDBC",
"JDBC_TABLE",
"JDBC_DRIVER",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.get_logger(spark=spark)
# Arguments
input_jdbc_url: str = args[constants.JDBCTOJDBC_INPUT_URL]
input_jdbc_driver: str = args[constants.JDBCTOJDBC_INPUT_DRIVER]
input_jdbc_table: str = args[constants.JDBCTOJDBC_INPUT_TABLE]
input_jdbc_partitioncolumn: str = args[constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN]
input_jdbc_lowerbound: str = args[constants.JDBCTOJDBC_INPUT_LOWERBOUND]
input_jdbc_upperbound: str = args[constants.JDBCTOJDBC_INPUT_UPPERBOUND]
jdbc_numpartitions: str = args[constants.
|
206 | 12 | 5,754 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBCTOJDBC_OUTPUT_URL
| true |
statement
| 75 | 75 | false | false |
[
"JDBC_URL",
"FORMAT_JDBC",
"JDBC_TABLE",
"JDBC_DRIVER",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.get_logger(spark=spark)
# Arguments
input_jdbc_url: str = args[constants.JDBCTOJDBC_INPUT_URL]
input_jdbc_driver: str = args[constants.JDBCTOJDBC_INPUT_DRIVER]
input_jdbc_table: str = args[constants.JDBCTOJDBC_INPUT_TABLE]
input_jdbc_partitioncolumn: str = args[constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN]
input_jdbc_lowerbound: str = args[constants.JDBCTOJDBC_INPUT_LOWERBOUND]
input_jdbc_upperbound: str = args[constants.JDBCTOJDBC_INPUT_UPPERBOUND]
jdbc_numpartitions: str = args[constants.JDBCTOJDBC_NUMPARTITIONS]
output_jdbc_url: str = args[constants.
|
207 | 12 | 5,826 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBCTOJDBC_OUTPUT_DRIVER
| true |
statement
| 75 | 75 | false | false |
[
"JDBC_DRIVER",
"JDBC_URL",
"FORMAT_JDBC",
"JDBC_TABLE",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.get_logger(spark=spark)
# Arguments
input_jdbc_url: str = args[constants.JDBCTOJDBC_INPUT_URL]
input_jdbc_driver: str = args[constants.JDBCTOJDBC_INPUT_DRIVER]
input_jdbc_table: str = args[constants.JDBCTOJDBC_INPUT_TABLE]
input_jdbc_partitioncolumn: str = args[constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN]
input_jdbc_lowerbound: str = args[constants.JDBCTOJDBC_INPUT_LOWERBOUND]
input_jdbc_upperbound: str = args[constants.JDBCTOJDBC_INPUT_UPPERBOUND]
jdbc_numpartitions: str = args[constants.JDBCTOJDBC_NUMPARTITIONS]
output_jdbc_url: str = args[constants.JDBCTOJDBC_OUTPUT_URL]
output_jdbc_driver: str = args[constants.
|
208 | 12 | 5,900 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBCTOJDBC_OUTPUT_TABLE
| true |
statement
| 75 | 75 | false | false |
[
"JDBC_TABLE",
"JDBC_URL",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.get_logger(spark=spark)
# Arguments
input_jdbc_url: str = args[constants.JDBCTOJDBC_INPUT_URL]
input_jdbc_driver: str = args[constants.JDBCTOJDBC_INPUT_DRIVER]
input_jdbc_table: str = args[constants.JDBCTOJDBC_INPUT_TABLE]
input_jdbc_partitioncolumn: str = args[constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN]
input_jdbc_lowerbound: str = args[constants.JDBCTOJDBC_INPUT_LOWERBOUND]
input_jdbc_upperbound: str = args[constants.JDBCTOJDBC_INPUT_UPPERBOUND]
jdbc_numpartitions: str = args[constants.JDBCTOJDBC_NUMPARTITIONS]
output_jdbc_url: str = args[constants.JDBCTOJDBC_OUTPUT_URL]
output_jdbc_driver: str = args[constants.JDBCTOJDBC_OUTPUT_DRIVER]
output_jdbc_table: str = args[constants.
|
209 | 12 | 5,987 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION
| true |
statement
| 75 | 75 | false | false |
[
"JDBC_TABLE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBC_URL",
"FORMAT_JDBC",
"JDBC_DRIVER",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.get_logger(spark=spark)
# Arguments
input_jdbc_url: str = args[constants.JDBCTOJDBC_INPUT_URL]
input_jdbc_driver: str = args[constants.JDBCTOJDBC_INPUT_DRIVER]
input_jdbc_table: str = args[constants.JDBCTOJDBC_INPUT_TABLE]
input_jdbc_partitioncolumn: str = args[constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN]
input_jdbc_lowerbound: str = args[constants.JDBCTOJDBC_INPUT_LOWERBOUND]
input_jdbc_upperbound: str = args[constants.JDBCTOJDBC_INPUT_UPPERBOUND]
jdbc_numpartitions: str = args[constants.JDBCTOJDBC_NUMPARTITIONS]
output_jdbc_url: str = args[constants.JDBCTOJDBC_OUTPUT_URL]
output_jdbc_driver: str = args[constants.JDBCTOJDBC_OUTPUT_DRIVER]
output_jdbc_table: str = args[constants.JDBCTOJDBC_OUTPUT_TABLE]
output_jdbc_create_table_option: str = args[constants.
|
210 | 12 | 6,073 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBCTOJDBC_OUTPUT_MODE
| true |
statement
| 75 | 75 | false | false |
[
"JDBC_URL",
"FORMAT_JDBC",
"JDBC_TABLE",
"OUTPUT_MODE_OVERWRITE",
"JDBC_DRIVER",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.get_logger(spark=spark)
# Arguments
input_jdbc_url: str = args[constants.JDBCTOJDBC_INPUT_URL]
input_jdbc_driver: str = args[constants.JDBCTOJDBC_INPUT_DRIVER]
input_jdbc_table: str = args[constants.JDBCTOJDBC_INPUT_TABLE]
input_jdbc_partitioncolumn: str = args[constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN]
input_jdbc_lowerbound: str = args[constants.JDBCTOJDBC_INPUT_LOWERBOUND]
input_jdbc_upperbound: str = args[constants.JDBCTOJDBC_INPUT_UPPERBOUND]
jdbc_numpartitions: str = args[constants.JDBCTOJDBC_NUMPARTITIONS]
output_jdbc_url: str = args[constants.JDBCTOJDBC_OUTPUT_URL]
output_jdbc_driver: str = args[constants.JDBCTOJDBC_OUTPUT_DRIVER]
output_jdbc_table: str = args[constants.JDBCTOJDBC_OUTPUT_TABLE]
output_jdbc_create_table_option: str = args[constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION]
output_jdbc_mode: str = args[constants.
|
211 | 12 | 6,150 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBCTOJDBC_OUTPUT_BATCH_SIZE
| true |
statement
| 75 | 75 | false | false |
[
"JDBC_BATCH_SIZE",
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.get_logger(spark=spark)
# Arguments
input_jdbc_url: str = args[constants.JDBCTOJDBC_INPUT_URL]
input_jdbc_driver: str = args[constants.JDBCTOJDBC_INPUT_DRIVER]
input_jdbc_table: str = args[constants.JDBCTOJDBC_INPUT_TABLE]
input_jdbc_partitioncolumn: str = args[constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN]
input_jdbc_lowerbound: str = args[constants.JDBCTOJDBC_INPUT_LOWERBOUND]
input_jdbc_upperbound: str = args[constants.JDBCTOJDBC_INPUT_UPPERBOUND]
jdbc_numpartitions: str = args[constants.JDBCTOJDBC_NUMPARTITIONS]
output_jdbc_url: str = args[constants.JDBCTOJDBC_OUTPUT_URL]
output_jdbc_driver: str = args[constants.JDBCTOJDBC_OUTPUT_DRIVER]
output_jdbc_table: str = args[constants.JDBCTOJDBC_OUTPUT_TABLE]
output_jdbc_create_table_option: str = args[constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION]
output_jdbc_mode: str = args[constants.JDBCTOJDBC_OUTPUT_MODE]
output_jdbc_batch_size: int = args[constants.
|
212 | 12 | 6,987 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
FORMAT_JDBC
| true |
statement
| 75 | 75 | false | true |
[
"FORMAT_JDBC",
"JDBC_URL",
"JDBC_TABLE",
"JDBC_DRIVER",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.get_logger(spark=spark)
# Arguments
input_jdbc_url: str = args[constants.JDBCTOJDBC_INPUT_URL]
input_jdbc_driver: str = args[constants.JDBCTOJDBC_INPUT_DRIVER]
input_jdbc_table: str = args[constants.JDBCTOJDBC_INPUT_TABLE]
input_jdbc_partitioncolumn: str = args[constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN]
input_jdbc_lowerbound: str = args[constants.JDBCTOJDBC_INPUT_LOWERBOUND]
input_jdbc_upperbound: str = args[constants.JDBCTOJDBC_INPUT_UPPERBOUND]
jdbc_numpartitions: str = args[constants.JDBCTOJDBC_NUMPARTITIONS]
output_jdbc_url: str = args[constants.JDBCTOJDBC_OUTPUT_URL]
output_jdbc_driver: str = args[constants.JDBCTOJDBC_OUTPUT_DRIVER]
output_jdbc_table: str = args[constants.JDBCTOJDBC_OUTPUT_TABLE]
output_jdbc_create_table_option: str = args[constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION]
output_jdbc_mode: str = args[constants.JDBCTOJDBC_OUTPUT_MODE]
output_jdbc_batch_size: int = args[constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE]
logger.info(
"Starting JDBC to JDBC spark job with parameters:\n"
f"{pprint.pformat(args)}"
)
# Read
input_data: DataFrame
partition_parameters=str(input_jdbc_partitioncolumn) + str(input_jdbc_lowerbound) + str(input_jdbc_upperbound)
if ((partition_parameters != "") & ((input_jdbc_partitioncolumn == "") | (input_jdbc_lowerbound == "") | (input_jdbc_upperbound == ""))):
logger.error("Set all the sql partitioning parameters together-jdbctojdbc.input.partitioncolumn,jdbctojdbc.input.lowerbound,jdbctojdbc.input.upperbound. Refer to README.md for more instructions.")
exit (1)
elif (partition_parameters == ""):
input_data=spark.read \
.format(constants.
|
213 | 12 | 7,036 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBC_URL
| true |
statement
| 75 | 75 | false | true |
[
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBC_URL",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.get_logger(spark=spark)
# Arguments
input_jdbc_url: str = args[constants.JDBCTOJDBC_INPUT_URL]
input_jdbc_driver: str = args[constants.JDBCTOJDBC_INPUT_DRIVER]
input_jdbc_table: str = args[constants.JDBCTOJDBC_INPUT_TABLE]
input_jdbc_partitioncolumn: str = args[constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN]
input_jdbc_lowerbound: str = args[constants.JDBCTOJDBC_INPUT_LOWERBOUND]
input_jdbc_upperbound: str = args[constants.JDBCTOJDBC_INPUT_UPPERBOUND]
jdbc_numpartitions: str = args[constants.JDBCTOJDBC_NUMPARTITIONS]
output_jdbc_url: str = args[constants.JDBCTOJDBC_OUTPUT_URL]
output_jdbc_driver: str = args[constants.JDBCTOJDBC_OUTPUT_DRIVER]
output_jdbc_table: str = args[constants.JDBCTOJDBC_OUTPUT_TABLE]
output_jdbc_create_table_option: str = args[constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION]
output_jdbc_mode: str = args[constants.JDBCTOJDBC_OUTPUT_MODE]
output_jdbc_batch_size: int = args[constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE]
logger.info(
"Starting JDBC to JDBC spark job with parameters:\n"
f"{pprint.pformat(args)}"
)
# Read
input_data: DataFrame
partition_parameters=str(input_jdbc_partitioncolumn) + str(input_jdbc_lowerbound) + str(input_jdbc_upperbound)
if ((partition_parameters != "") & ((input_jdbc_partitioncolumn == "") | (input_jdbc_lowerbound == "") | (input_jdbc_upperbound == ""))):
logger.error("Set all the sql partitioning parameters together-jdbctojdbc.input.partitioncolumn,jdbctojdbc.input.lowerbound,jdbctojdbc.input.upperbound. Refer to README.md for more instructions.")
exit (1)
elif (partition_parameters == ""):
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.
|
214 | 12 | 7,098 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBC_DRIVER
| true |
statement
| 75 | 75 | false | true |
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_NUMPARTITIONS",
"JDBC_DRIVER",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.get_logger(spark=spark)
# Arguments
input_jdbc_url: str = args[constants.JDBCTOJDBC_INPUT_URL]
input_jdbc_driver: str = args[constants.JDBCTOJDBC_INPUT_DRIVER]
input_jdbc_table: str = args[constants.JDBCTOJDBC_INPUT_TABLE]
input_jdbc_partitioncolumn: str = args[constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN]
input_jdbc_lowerbound: str = args[constants.JDBCTOJDBC_INPUT_LOWERBOUND]
input_jdbc_upperbound: str = args[constants.JDBCTOJDBC_INPUT_UPPERBOUND]
jdbc_numpartitions: str = args[constants.JDBCTOJDBC_NUMPARTITIONS]
output_jdbc_url: str = args[constants.JDBCTOJDBC_OUTPUT_URL]
output_jdbc_driver: str = args[constants.JDBCTOJDBC_OUTPUT_DRIVER]
output_jdbc_table: str = args[constants.JDBCTOJDBC_OUTPUT_TABLE]
output_jdbc_create_table_option: str = args[constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION]
output_jdbc_mode: str = args[constants.JDBCTOJDBC_OUTPUT_MODE]
output_jdbc_batch_size: int = args[constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE]
logger.info(
"Starting JDBC to JDBC spark job with parameters:\n"
f"{pprint.pformat(args)}"
)
# Read
input_data: DataFrame
partition_parameters=str(input_jdbc_partitioncolumn) + str(input_jdbc_lowerbound) + str(input_jdbc_upperbound)
if ((partition_parameters != "") & ((input_jdbc_partitioncolumn == "") | (input_jdbc_lowerbound == "") | (input_jdbc_upperbound == ""))):
logger.error("Set all the sql partitioning parameters together-jdbctojdbc.input.partitioncolumn,jdbctojdbc.input.lowerbound,jdbctojdbc.input.upperbound. Refer to README.md for more instructions.")
exit (1)
elif (partition_parameters == ""):
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, input_jdbc_url) \
.option(constants.
|
215 | 12 | 7,166 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBC_TABLE
| true |
statement
| 75 | 75 | false | true |
[
"JDBC_URL",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBC_TABLE",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.get_logger(spark=spark)
# Arguments
input_jdbc_url: str = args[constants.JDBCTOJDBC_INPUT_URL]
input_jdbc_driver: str = args[constants.JDBCTOJDBC_INPUT_DRIVER]
input_jdbc_table: str = args[constants.JDBCTOJDBC_INPUT_TABLE]
input_jdbc_partitioncolumn: str = args[constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN]
input_jdbc_lowerbound: str = args[constants.JDBCTOJDBC_INPUT_LOWERBOUND]
input_jdbc_upperbound: str = args[constants.JDBCTOJDBC_INPUT_UPPERBOUND]
jdbc_numpartitions: str = args[constants.JDBCTOJDBC_NUMPARTITIONS]
output_jdbc_url: str = args[constants.JDBCTOJDBC_OUTPUT_URL]
output_jdbc_driver: str = args[constants.JDBCTOJDBC_OUTPUT_DRIVER]
output_jdbc_table: str = args[constants.JDBCTOJDBC_OUTPUT_TABLE]
output_jdbc_create_table_option: str = args[constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION]
output_jdbc_mode: str = args[constants.JDBCTOJDBC_OUTPUT_MODE]
output_jdbc_batch_size: int = args[constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE]
logger.info(
"Starting JDBC to JDBC spark job with parameters:\n"
f"{pprint.pformat(args)}"
)
# Read
input_data: DataFrame
partition_parameters=str(input_jdbc_partitioncolumn) + str(input_jdbc_lowerbound) + str(input_jdbc_upperbound)
if ((partition_parameters != "") & ((input_jdbc_partitioncolumn == "") | (input_jdbc_lowerbound == "") | (input_jdbc_upperbound == ""))):
logger.error("Set all the sql partitioning parameters together-jdbctojdbc.input.partitioncolumn,jdbctojdbc.input.lowerbound,jdbctojdbc.input.upperbound. Refer to README.md for more instructions.")
exit (1)
elif (partition_parameters == ""):
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, input_jdbc_url) \
.option(constants.JDBC_DRIVER, input_jdbc_driver) \
.option(constants.
|
216 | 12 | 7,232 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBC_NUMPARTITIONS
| true |
statement
| 75 | 75 | false | true |
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBC_CREATE_TABLE_OPTIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.get_logger(spark=spark)
# Arguments
input_jdbc_url: str = args[constants.JDBCTOJDBC_INPUT_URL]
input_jdbc_driver: str = args[constants.JDBCTOJDBC_INPUT_DRIVER]
input_jdbc_table: str = args[constants.JDBCTOJDBC_INPUT_TABLE]
input_jdbc_partitioncolumn: str = args[constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN]
input_jdbc_lowerbound: str = args[constants.JDBCTOJDBC_INPUT_LOWERBOUND]
input_jdbc_upperbound: str = args[constants.JDBCTOJDBC_INPUT_UPPERBOUND]
jdbc_numpartitions: str = args[constants.JDBCTOJDBC_NUMPARTITIONS]
output_jdbc_url: str = args[constants.JDBCTOJDBC_OUTPUT_URL]
output_jdbc_driver: str = args[constants.JDBCTOJDBC_OUTPUT_DRIVER]
output_jdbc_table: str = args[constants.JDBCTOJDBC_OUTPUT_TABLE]
output_jdbc_create_table_option: str = args[constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION]
output_jdbc_mode: str = args[constants.JDBCTOJDBC_OUTPUT_MODE]
output_jdbc_batch_size: int = args[constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE]
logger.info(
"Starting JDBC to JDBC spark job with parameters:\n"
f"{pprint.pformat(args)}"
)
# Read
input_data: DataFrame
partition_parameters=str(input_jdbc_partitioncolumn) + str(input_jdbc_lowerbound) + str(input_jdbc_upperbound)
if ((partition_parameters != "") & ((input_jdbc_partitioncolumn == "") | (input_jdbc_lowerbound == "") | (input_jdbc_upperbound == ""))):
logger.error("Set all the sql partitioning parameters together-jdbctojdbc.input.partitioncolumn,jdbctojdbc.input.lowerbound,jdbctojdbc.input.upperbound. Refer to README.md for more instructions.")
exit (1)
elif (partition_parameters == ""):
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, input_jdbc_url) \
.option(constants.JDBC_DRIVER, input_jdbc_driver) \
.option(constants.JDBC_TABLE, input_jdbc_table) \
.option(constants.
|
217 | 12 | 7,382 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
FORMAT_JDBC
| true |
statement
| 75 | 75 | false | false |
[
"FORMAT_JDBC",
"JDBC_URL",
"JDBC_TABLE",
"JDBC_DRIVER",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.get_logger(spark=spark)
# Arguments
input_jdbc_url: str = args[constants.JDBCTOJDBC_INPUT_URL]
input_jdbc_driver: str = args[constants.JDBCTOJDBC_INPUT_DRIVER]
input_jdbc_table: str = args[constants.JDBCTOJDBC_INPUT_TABLE]
input_jdbc_partitioncolumn: str = args[constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN]
input_jdbc_lowerbound: str = args[constants.JDBCTOJDBC_INPUT_LOWERBOUND]
input_jdbc_upperbound: str = args[constants.JDBCTOJDBC_INPUT_UPPERBOUND]
jdbc_numpartitions: str = args[constants.JDBCTOJDBC_NUMPARTITIONS]
output_jdbc_url: str = args[constants.JDBCTOJDBC_OUTPUT_URL]
output_jdbc_driver: str = args[constants.JDBCTOJDBC_OUTPUT_DRIVER]
output_jdbc_table: str = args[constants.JDBCTOJDBC_OUTPUT_TABLE]
output_jdbc_create_table_option: str = args[constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION]
output_jdbc_mode: str = args[constants.JDBCTOJDBC_OUTPUT_MODE]
output_jdbc_batch_size: int = args[constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE]
logger.info(
"Starting JDBC to JDBC spark job with parameters:\n"
f"{pprint.pformat(args)}"
)
# Read
input_data: DataFrame
partition_parameters=str(input_jdbc_partitioncolumn) + str(input_jdbc_lowerbound) + str(input_jdbc_upperbound)
if ((partition_parameters != "") & ((input_jdbc_partitioncolumn == "") | (input_jdbc_lowerbound == "") | (input_jdbc_upperbound == ""))):
logger.error("Set all the sql partitioning parameters together-jdbctojdbc.input.partitioncolumn,jdbctojdbc.input.lowerbound,jdbctojdbc.input.upperbound. Refer to README.md for more instructions.")
exit (1)
elif (partition_parameters == ""):
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, input_jdbc_url) \
.option(constants.JDBC_DRIVER, input_jdbc_driver) \
.option(constants.JDBC_TABLE, input_jdbc_table) \
.option(constants.JDBC_NUMPARTITIONS, jdbc_numpartitions) \
.load()
else:
input_data=spark.read \
.format(constants.
|
218 | 12 | 7,431 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBC_URL
| true |
statement
| 75 | 75 | false | false |
[
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBC_URL",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.get_logger(spark=spark)
# Arguments
input_jdbc_url: str = args[constants.JDBCTOJDBC_INPUT_URL]
input_jdbc_driver: str = args[constants.JDBCTOJDBC_INPUT_DRIVER]
input_jdbc_table: str = args[constants.JDBCTOJDBC_INPUT_TABLE]
input_jdbc_partitioncolumn: str = args[constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN]
input_jdbc_lowerbound: str = args[constants.JDBCTOJDBC_INPUT_LOWERBOUND]
input_jdbc_upperbound: str = args[constants.JDBCTOJDBC_INPUT_UPPERBOUND]
jdbc_numpartitions: str = args[constants.JDBCTOJDBC_NUMPARTITIONS]
output_jdbc_url: str = args[constants.JDBCTOJDBC_OUTPUT_URL]
output_jdbc_driver: str = args[constants.JDBCTOJDBC_OUTPUT_DRIVER]
output_jdbc_table: str = args[constants.JDBCTOJDBC_OUTPUT_TABLE]
output_jdbc_create_table_option: str = args[constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION]
output_jdbc_mode: str = args[constants.JDBCTOJDBC_OUTPUT_MODE]
output_jdbc_batch_size: int = args[constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE]
logger.info(
"Starting JDBC to JDBC spark job with parameters:\n"
f"{pprint.pformat(args)}"
)
# Read
input_data: DataFrame
partition_parameters=str(input_jdbc_partitioncolumn) + str(input_jdbc_lowerbound) + str(input_jdbc_upperbound)
if ((partition_parameters != "") & ((input_jdbc_partitioncolumn == "") | (input_jdbc_lowerbound == "") | (input_jdbc_upperbound == ""))):
logger.error("Set all the sql partitioning parameters together-jdbctojdbc.input.partitioncolumn,jdbctojdbc.input.lowerbound,jdbctojdbc.input.upperbound. Refer to README.md for more instructions.")
exit (1)
elif (partition_parameters == ""):
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, input_jdbc_url) \
.option(constants.JDBC_DRIVER, input_jdbc_driver) \
.option(constants.JDBC_TABLE, input_jdbc_table) \
.option(constants.JDBC_NUMPARTITIONS, jdbc_numpartitions) \
.load()
else:
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.
|
219 | 12 | 7,493 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBC_DRIVER
| true |
statement
| 75 | 75 | false | false |
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_NUMPARTITIONS",
"JDBC_DRIVER",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.get_logger(spark=spark)
# Arguments
input_jdbc_url: str = args[constants.JDBCTOJDBC_INPUT_URL]
input_jdbc_driver: str = args[constants.JDBCTOJDBC_INPUT_DRIVER]
input_jdbc_table: str = args[constants.JDBCTOJDBC_INPUT_TABLE]
input_jdbc_partitioncolumn: str = args[constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN]
input_jdbc_lowerbound: str = args[constants.JDBCTOJDBC_INPUT_LOWERBOUND]
input_jdbc_upperbound: str = args[constants.JDBCTOJDBC_INPUT_UPPERBOUND]
jdbc_numpartitions: str = args[constants.JDBCTOJDBC_NUMPARTITIONS]
output_jdbc_url: str = args[constants.JDBCTOJDBC_OUTPUT_URL]
output_jdbc_driver: str = args[constants.JDBCTOJDBC_OUTPUT_DRIVER]
output_jdbc_table: str = args[constants.JDBCTOJDBC_OUTPUT_TABLE]
output_jdbc_create_table_option: str = args[constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION]
output_jdbc_mode: str = args[constants.JDBCTOJDBC_OUTPUT_MODE]
output_jdbc_batch_size: int = args[constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE]
logger.info(
"Starting JDBC to JDBC spark job with parameters:\n"
f"{pprint.pformat(args)}"
)
# Read
input_data: DataFrame
partition_parameters=str(input_jdbc_partitioncolumn) + str(input_jdbc_lowerbound) + str(input_jdbc_upperbound)
if ((partition_parameters != "") & ((input_jdbc_partitioncolumn == "") | (input_jdbc_lowerbound == "") | (input_jdbc_upperbound == ""))):
logger.error("Set all the sql partitioning parameters together-jdbctojdbc.input.partitioncolumn,jdbctojdbc.input.lowerbound,jdbctojdbc.input.upperbound. Refer to README.md for more instructions.")
exit (1)
elif (partition_parameters == ""):
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, input_jdbc_url) \
.option(constants.JDBC_DRIVER, input_jdbc_driver) \
.option(constants.JDBC_TABLE, input_jdbc_table) \
.option(constants.JDBC_NUMPARTITIONS, jdbc_numpartitions) \
.load()
else:
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, input_jdbc_url) \
.option(constants.
|
220 | 12 | 7,561 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBC_TABLE
| true |
statement
| 75 | 75 | false | false |
[
"JDBC_URL",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBC_TABLE",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.get_logger(spark=spark)
# Arguments
input_jdbc_url: str = args[constants.JDBCTOJDBC_INPUT_URL]
input_jdbc_driver: str = args[constants.JDBCTOJDBC_INPUT_DRIVER]
input_jdbc_table: str = args[constants.JDBCTOJDBC_INPUT_TABLE]
input_jdbc_partitioncolumn: str = args[constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN]
input_jdbc_lowerbound: str = args[constants.JDBCTOJDBC_INPUT_LOWERBOUND]
input_jdbc_upperbound: str = args[constants.JDBCTOJDBC_INPUT_UPPERBOUND]
jdbc_numpartitions: str = args[constants.JDBCTOJDBC_NUMPARTITIONS]
output_jdbc_url: str = args[constants.JDBCTOJDBC_OUTPUT_URL]
output_jdbc_driver: str = args[constants.JDBCTOJDBC_OUTPUT_DRIVER]
output_jdbc_table: str = args[constants.JDBCTOJDBC_OUTPUT_TABLE]
output_jdbc_create_table_option: str = args[constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION]
output_jdbc_mode: str = args[constants.JDBCTOJDBC_OUTPUT_MODE]
output_jdbc_batch_size: int = args[constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE]
logger.info(
"Starting JDBC to JDBC spark job with parameters:\n"
f"{pprint.pformat(args)}"
)
# Read
input_data: DataFrame
partition_parameters=str(input_jdbc_partitioncolumn) + str(input_jdbc_lowerbound) + str(input_jdbc_upperbound)
if ((partition_parameters != "") & ((input_jdbc_partitioncolumn == "") | (input_jdbc_lowerbound == "") | (input_jdbc_upperbound == ""))):
logger.error("Set all the sql partitioning parameters together-jdbctojdbc.input.partitioncolumn,jdbctojdbc.input.lowerbound,jdbctojdbc.input.upperbound. Refer to README.md for more instructions.")
exit (1)
elif (partition_parameters == ""):
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, input_jdbc_url) \
.option(constants.JDBC_DRIVER, input_jdbc_driver) \
.option(constants.JDBC_TABLE, input_jdbc_table) \
.option(constants.JDBC_NUMPARTITIONS, jdbc_numpartitions) \
.load()
else:
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, input_jdbc_url) \
.option(constants.JDBC_DRIVER, input_jdbc_driver) \
.option(constants.
|
221 | 12 | 7,627 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBC_PARTITIONCOLUMN
| true |
statement
| 75 | 75 | false | true |
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.get_logger(spark=spark)
# Arguments
input_jdbc_url: str = args[constants.JDBCTOJDBC_INPUT_URL]
input_jdbc_driver: str = args[constants.JDBCTOJDBC_INPUT_DRIVER]
input_jdbc_table: str = args[constants.JDBCTOJDBC_INPUT_TABLE]
input_jdbc_partitioncolumn: str = args[constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN]
input_jdbc_lowerbound: str = args[constants.JDBCTOJDBC_INPUT_LOWERBOUND]
input_jdbc_upperbound: str = args[constants.JDBCTOJDBC_INPUT_UPPERBOUND]
jdbc_numpartitions: str = args[constants.JDBCTOJDBC_NUMPARTITIONS]
output_jdbc_url: str = args[constants.JDBCTOJDBC_OUTPUT_URL]
output_jdbc_driver: str = args[constants.JDBCTOJDBC_OUTPUT_DRIVER]
output_jdbc_table: str = args[constants.JDBCTOJDBC_OUTPUT_TABLE]
output_jdbc_create_table_option: str = args[constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION]
output_jdbc_mode: str = args[constants.JDBCTOJDBC_OUTPUT_MODE]
output_jdbc_batch_size: int = args[constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE]
logger.info(
"Starting JDBC to JDBC spark job with parameters:\n"
f"{pprint.pformat(args)}"
)
# Read
input_data: DataFrame
partition_parameters=str(input_jdbc_partitioncolumn) + str(input_jdbc_lowerbound) + str(input_jdbc_upperbound)
if ((partition_parameters != "") & ((input_jdbc_partitioncolumn == "") | (input_jdbc_lowerbound == "") | (input_jdbc_upperbound == ""))):
logger.error("Set all the sql partitioning parameters together-jdbctojdbc.input.partitioncolumn,jdbctojdbc.input.lowerbound,jdbctojdbc.input.upperbound. Refer to README.md for more instructions.")
exit (1)
elif (partition_parameters == ""):
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, input_jdbc_url) \
.option(constants.JDBC_DRIVER, input_jdbc_driver) \
.option(constants.JDBC_TABLE, input_jdbc_table) \
.option(constants.JDBC_NUMPARTITIONS, jdbc_numpartitions) \
.load()
else:
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, input_jdbc_url) \
.option(constants.JDBC_DRIVER, input_jdbc_driver) \
.option(constants.JDBC_TABLE, input_jdbc_table) \
.option(constants.
|
222 | 12 | 7,713 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBC_LOWERBOUND
| true |
statement
| 75 | 75 | false | true |
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.get_logger(spark=spark)
# Arguments
input_jdbc_url: str = args[constants.JDBCTOJDBC_INPUT_URL]
input_jdbc_driver: str = args[constants.JDBCTOJDBC_INPUT_DRIVER]
input_jdbc_table: str = args[constants.JDBCTOJDBC_INPUT_TABLE]
input_jdbc_partitioncolumn: str = args[constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN]
input_jdbc_lowerbound: str = args[constants.JDBCTOJDBC_INPUT_LOWERBOUND]
input_jdbc_upperbound: str = args[constants.JDBCTOJDBC_INPUT_UPPERBOUND]
jdbc_numpartitions: str = args[constants.JDBCTOJDBC_NUMPARTITIONS]
output_jdbc_url: str = args[constants.JDBCTOJDBC_OUTPUT_URL]
output_jdbc_driver: str = args[constants.JDBCTOJDBC_OUTPUT_DRIVER]
output_jdbc_table: str = args[constants.JDBCTOJDBC_OUTPUT_TABLE]
output_jdbc_create_table_option: str = args[constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION]
output_jdbc_mode: str = args[constants.JDBCTOJDBC_OUTPUT_MODE]
output_jdbc_batch_size: int = args[constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE]
logger.info(
"Starting JDBC to JDBC spark job with parameters:\n"
f"{pprint.pformat(args)}"
)
# Read
input_data: DataFrame
partition_parameters=str(input_jdbc_partitioncolumn) + str(input_jdbc_lowerbound) + str(input_jdbc_upperbound)
if ((partition_parameters != "") & ((input_jdbc_partitioncolumn == "") | (input_jdbc_lowerbound == "") | (input_jdbc_upperbound == ""))):
logger.error("Set all the sql partitioning parameters together-jdbctojdbc.input.partitioncolumn,jdbctojdbc.input.lowerbound,jdbctojdbc.input.upperbound. Refer to README.md for more instructions.")
exit (1)
elif (partition_parameters == ""):
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, input_jdbc_url) \
.option(constants.JDBC_DRIVER, input_jdbc_driver) \
.option(constants.JDBC_TABLE, input_jdbc_table) \
.option(constants.JDBC_NUMPARTITIONS, jdbc_numpartitions) \
.load()
else:
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, input_jdbc_url) \
.option(constants.JDBC_DRIVER, input_jdbc_driver) \
.option(constants.JDBC_TABLE, input_jdbc_table) \
.option(constants.JDBC_PARTITIONCOLUMN, input_jdbc_partitioncolumn) \
.option(constants.
|
223 | 12 | 7,789 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBC_UPPERBOUND
| true |
statement
| 75 | 75 | false | true |
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.get_logger(spark=spark)
# Arguments
input_jdbc_url: str = args[constants.JDBCTOJDBC_INPUT_URL]
input_jdbc_driver: str = args[constants.JDBCTOJDBC_INPUT_DRIVER]
input_jdbc_table: str = args[constants.JDBCTOJDBC_INPUT_TABLE]
input_jdbc_partitioncolumn: str = args[constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN]
input_jdbc_lowerbound: str = args[constants.JDBCTOJDBC_INPUT_LOWERBOUND]
input_jdbc_upperbound: str = args[constants.JDBCTOJDBC_INPUT_UPPERBOUND]
jdbc_numpartitions: str = args[constants.JDBCTOJDBC_NUMPARTITIONS]
output_jdbc_url: str = args[constants.JDBCTOJDBC_OUTPUT_URL]
output_jdbc_driver: str = args[constants.JDBCTOJDBC_OUTPUT_DRIVER]
output_jdbc_table: str = args[constants.JDBCTOJDBC_OUTPUT_TABLE]
output_jdbc_create_table_option: str = args[constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION]
output_jdbc_mode: str = args[constants.JDBCTOJDBC_OUTPUT_MODE]
output_jdbc_batch_size: int = args[constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE]
logger.info(
"Starting JDBC to JDBC spark job with parameters:\n"
f"{pprint.pformat(args)}"
)
# Read
input_data: DataFrame
partition_parameters=str(input_jdbc_partitioncolumn) + str(input_jdbc_lowerbound) + str(input_jdbc_upperbound)
if ((partition_parameters != "") & ((input_jdbc_partitioncolumn == "") | (input_jdbc_lowerbound == "") | (input_jdbc_upperbound == ""))):
logger.error("Set all the sql partitioning parameters together-jdbctojdbc.input.partitioncolumn,jdbctojdbc.input.lowerbound,jdbctojdbc.input.upperbound. Refer to README.md for more instructions.")
exit (1)
elif (partition_parameters == ""):
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, input_jdbc_url) \
.option(constants.JDBC_DRIVER, input_jdbc_driver) \
.option(constants.JDBC_TABLE, input_jdbc_table) \
.option(constants.JDBC_NUMPARTITIONS, jdbc_numpartitions) \
.load()
else:
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, input_jdbc_url) \
.option(constants.JDBC_DRIVER, input_jdbc_driver) \
.option(constants.JDBC_TABLE, input_jdbc_table) \
.option(constants.JDBC_PARTITIONCOLUMN, input_jdbc_partitioncolumn) \
.option(constants.JDBC_LOWERBOUND, input_jdbc_lowerbound) \
.option(constants.
|
224 | 12 | 7,865 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBC_NUMPARTITIONS
| true |
statement
| 75 | 75 | false | false |
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBC_CREATE_TABLE_OPTIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_LOWERBOUND",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.get_logger(spark=spark)
# Arguments
input_jdbc_url: str = args[constants.JDBCTOJDBC_INPUT_URL]
input_jdbc_driver: str = args[constants.JDBCTOJDBC_INPUT_DRIVER]
input_jdbc_table: str = args[constants.JDBCTOJDBC_INPUT_TABLE]
input_jdbc_partitioncolumn: str = args[constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN]
input_jdbc_lowerbound: str = args[constants.JDBCTOJDBC_INPUT_LOWERBOUND]
input_jdbc_upperbound: str = args[constants.JDBCTOJDBC_INPUT_UPPERBOUND]
jdbc_numpartitions: str = args[constants.JDBCTOJDBC_NUMPARTITIONS]
output_jdbc_url: str = args[constants.JDBCTOJDBC_OUTPUT_URL]
output_jdbc_driver: str = args[constants.JDBCTOJDBC_OUTPUT_DRIVER]
output_jdbc_table: str = args[constants.JDBCTOJDBC_OUTPUT_TABLE]
output_jdbc_create_table_option: str = args[constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION]
output_jdbc_mode: str = args[constants.JDBCTOJDBC_OUTPUT_MODE]
output_jdbc_batch_size: int = args[constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE]
logger.info(
"Starting JDBC to JDBC spark job with parameters:\n"
f"{pprint.pformat(args)}"
)
# Read
input_data: DataFrame
partition_parameters=str(input_jdbc_partitioncolumn) + str(input_jdbc_lowerbound) + str(input_jdbc_upperbound)
if ((partition_parameters != "") & ((input_jdbc_partitioncolumn == "") | (input_jdbc_lowerbound == "") | (input_jdbc_upperbound == ""))):
logger.error("Set all the sql partitioning parameters together-jdbctojdbc.input.partitioncolumn,jdbctojdbc.input.lowerbound,jdbctojdbc.input.upperbound. Refer to README.md for more instructions.")
exit (1)
elif (partition_parameters == ""):
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, input_jdbc_url) \
.option(constants.JDBC_DRIVER, input_jdbc_driver) \
.option(constants.JDBC_TABLE, input_jdbc_table) \
.option(constants.JDBC_NUMPARTITIONS, jdbc_numpartitions) \
.load()
else:
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, input_jdbc_url) \
.option(constants.JDBC_DRIVER, input_jdbc_driver) \
.option(constants.JDBC_TABLE, input_jdbc_table) \
.option(constants.JDBC_PARTITIONCOLUMN, input_jdbc_partitioncolumn) \
.option(constants.JDBC_LOWERBOUND, input_jdbc_lowerbound) \
.option(constants.JDBC_UPPERBOUND, input_jdbc_upperbound) \
.option(constants.
|
225 | 12 | 8,005 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
FORMAT_JDBC
| true |
statement
| 75 | 75 | false | false |
[
"FORMAT_JDBC",
"JDBC_URL",
"JDBC_TABLE",
"JDBC_DRIVER",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.get_logger(spark=spark)
# Arguments
input_jdbc_url: str = args[constants.JDBCTOJDBC_INPUT_URL]
input_jdbc_driver: str = args[constants.JDBCTOJDBC_INPUT_DRIVER]
input_jdbc_table: str = args[constants.JDBCTOJDBC_INPUT_TABLE]
input_jdbc_partitioncolumn: str = args[constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN]
input_jdbc_lowerbound: str = args[constants.JDBCTOJDBC_INPUT_LOWERBOUND]
input_jdbc_upperbound: str = args[constants.JDBCTOJDBC_INPUT_UPPERBOUND]
jdbc_numpartitions: str = args[constants.JDBCTOJDBC_NUMPARTITIONS]
output_jdbc_url: str = args[constants.JDBCTOJDBC_OUTPUT_URL]
output_jdbc_driver: str = args[constants.JDBCTOJDBC_OUTPUT_DRIVER]
output_jdbc_table: str = args[constants.JDBCTOJDBC_OUTPUT_TABLE]
output_jdbc_create_table_option: str = args[constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION]
output_jdbc_mode: str = args[constants.JDBCTOJDBC_OUTPUT_MODE]
output_jdbc_batch_size: int = args[constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE]
logger.info(
"Starting JDBC to JDBC spark job with parameters:\n"
f"{pprint.pformat(args)}"
)
# Read
input_data: DataFrame
partition_parameters=str(input_jdbc_partitioncolumn) + str(input_jdbc_lowerbound) + str(input_jdbc_upperbound)
if ((partition_parameters != "") & ((input_jdbc_partitioncolumn == "") | (input_jdbc_lowerbound == "") | (input_jdbc_upperbound == ""))):
logger.error("Set all the sql partitioning parameters together-jdbctojdbc.input.partitioncolumn,jdbctojdbc.input.lowerbound,jdbctojdbc.input.upperbound. Refer to README.md for more instructions.")
exit (1)
elif (partition_parameters == ""):
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, input_jdbc_url) \
.option(constants.JDBC_DRIVER, input_jdbc_driver) \
.option(constants.JDBC_TABLE, input_jdbc_table) \
.option(constants.JDBC_NUMPARTITIONS, jdbc_numpartitions) \
.load()
else:
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, input_jdbc_url) \
.option(constants.JDBC_DRIVER, input_jdbc_driver) \
.option(constants.JDBC_TABLE, input_jdbc_table) \
.option(constants.JDBC_PARTITIONCOLUMN, input_jdbc_partitioncolumn) \
.option(constants.JDBC_LOWERBOUND, input_jdbc_lowerbound) \
.option(constants.JDBC_UPPERBOUND, input_jdbc_upperbound) \
.option(constants.JDBC_NUMPARTITIONS, jdbc_numpartitions) \
.load()
# Write
input_data.write \
.format(constants.
|
226 | 12 | 8,050 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBC_URL
| true |
statement
| 75 | 75 | false | false |
[
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBC_URL",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.get_logger(spark=spark)
# Arguments
input_jdbc_url: str = args[constants.JDBCTOJDBC_INPUT_URL]
input_jdbc_driver: str = args[constants.JDBCTOJDBC_INPUT_DRIVER]
input_jdbc_table: str = args[constants.JDBCTOJDBC_INPUT_TABLE]
input_jdbc_partitioncolumn: str = args[constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN]
input_jdbc_lowerbound: str = args[constants.JDBCTOJDBC_INPUT_LOWERBOUND]
input_jdbc_upperbound: str = args[constants.JDBCTOJDBC_INPUT_UPPERBOUND]
jdbc_numpartitions: str = args[constants.JDBCTOJDBC_NUMPARTITIONS]
output_jdbc_url: str = args[constants.JDBCTOJDBC_OUTPUT_URL]
output_jdbc_driver: str = args[constants.JDBCTOJDBC_OUTPUT_DRIVER]
output_jdbc_table: str = args[constants.JDBCTOJDBC_OUTPUT_TABLE]
output_jdbc_create_table_option: str = args[constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION]
output_jdbc_mode: str = args[constants.JDBCTOJDBC_OUTPUT_MODE]
output_jdbc_batch_size: int = args[constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE]
logger.info(
"Starting JDBC to JDBC spark job with parameters:\n"
f"{pprint.pformat(args)}"
)
# Read
input_data: DataFrame
partition_parameters=str(input_jdbc_partitioncolumn) + str(input_jdbc_lowerbound) + str(input_jdbc_upperbound)
if ((partition_parameters != "") & ((input_jdbc_partitioncolumn == "") | (input_jdbc_lowerbound == "") | (input_jdbc_upperbound == ""))):
logger.error("Set all the sql partitioning parameters together-jdbctojdbc.input.partitioncolumn,jdbctojdbc.input.lowerbound,jdbctojdbc.input.upperbound. Refer to README.md for more instructions.")
exit (1)
elif (partition_parameters == ""):
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, input_jdbc_url) \
.option(constants.JDBC_DRIVER, input_jdbc_driver) \
.option(constants.JDBC_TABLE, input_jdbc_table) \
.option(constants.JDBC_NUMPARTITIONS, jdbc_numpartitions) \
.load()
else:
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, input_jdbc_url) \
.option(constants.JDBC_DRIVER, input_jdbc_driver) \
.option(constants.JDBC_TABLE, input_jdbc_table) \
.option(constants.JDBC_PARTITIONCOLUMN, input_jdbc_partitioncolumn) \
.option(constants.JDBC_LOWERBOUND, input_jdbc_lowerbound) \
.option(constants.JDBC_UPPERBOUND, input_jdbc_upperbound) \
.option(constants.JDBC_NUMPARTITIONS, jdbc_numpartitions) \
.load()
# Write
input_data.write \
.format(constants.FORMAT_JDBC) \
.option(constants.
|
227 | 12 | 8,109 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBC_DRIVER
| true |
statement
| 75 | 75 | false | false |
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_NUMPARTITIONS",
"JDBC_DRIVER",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.get_logger(spark=spark)
# Arguments
input_jdbc_url: str = args[constants.JDBCTOJDBC_INPUT_URL]
input_jdbc_driver: str = args[constants.JDBCTOJDBC_INPUT_DRIVER]
input_jdbc_table: str = args[constants.JDBCTOJDBC_INPUT_TABLE]
input_jdbc_partitioncolumn: str = args[constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN]
input_jdbc_lowerbound: str = args[constants.JDBCTOJDBC_INPUT_LOWERBOUND]
input_jdbc_upperbound: str = args[constants.JDBCTOJDBC_INPUT_UPPERBOUND]
jdbc_numpartitions: str = args[constants.JDBCTOJDBC_NUMPARTITIONS]
output_jdbc_url: str = args[constants.JDBCTOJDBC_OUTPUT_URL]
output_jdbc_driver: str = args[constants.JDBCTOJDBC_OUTPUT_DRIVER]
output_jdbc_table: str = args[constants.JDBCTOJDBC_OUTPUT_TABLE]
output_jdbc_create_table_option: str = args[constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION]
output_jdbc_mode: str = args[constants.JDBCTOJDBC_OUTPUT_MODE]
output_jdbc_batch_size: int = args[constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE]
logger.info(
"Starting JDBC to JDBC spark job with parameters:\n"
f"{pprint.pformat(args)}"
)
# Read
input_data: DataFrame
partition_parameters=str(input_jdbc_partitioncolumn) + str(input_jdbc_lowerbound) + str(input_jdbc_upperbound)
if ((partition_parameters != "") & ((input_jdbc_partitioncolumn == "") | (input_jdbc_lowerbound == "") | (input_jdbc_upperbound == ""))):
logger.error("Set all the sql partitioning parameters together-jdbctojdbc.input.partitioncolumn,jdbctojdbc.input.lowerbound,jdbctojdbc.input.upperbound. Refer to README.md for more instructions.")
exit (1)
elif (partition_parameters == ""):
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, input_jdbc_url) \
.option(constants.JDBC_DRIVER, input_jdbc_driver) \
.option(constants.JDBC_TABLE, input_jdbc_table) \
.option(constants.JDBC_NUMPARTITIONS, jdbc_numpartitions) \
.load()
else:
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, input_jdbc_url) \
.option(constants.JDBC_DRIVER, input_jdbc_driver) \
.option(constants.JDBC_TABLE, input_jdbc_table) \
.option(constants.JDBC_PARTITIONCOLUMN, input_jdbc_partitioncolumn) \
.option(constants.JDBC_LOWERBOUND, input_jdbc_lowerbound) \
.option(constants.JDBC_UPPERBOUND, input_jdbc_upperbound) \
.option(constants.JDBC_NUMPARTITIONS, jdbc_numpartitions) \
.load()
# Write
input_data.write \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, output_jdbc_url) \
.option(constants.
|
228 | 12 | 8,174 |
googlecloudplatform__dataproc-templates
|
4d63cf08165dbee145ce8b26d7aa0b11ff4c5a8f
|
python/dataproc_templates/jdbc/jdbc_to_jdbc.py
|
Unknown
|
JDBC_TABLE
| true |
statement
| 75 | 75 | false | false |
[
"JDBC_URL",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBC_TABLE",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict, Sequence, Optional, Any
from logging import Logger
import argparse
import pprint
from pyspark.sql import SparkSession, DataFrame
from dataproc_templates import BaseTemplate
import dataproc_templates.util.template_constants as constants
__all__ = ['JDBCToJDBCTemplate']
class JDBCToJDBCTemplate(BaseTemplate):
"""
Dataproc template implementing loads from JDBC into JDBC
"""
@staticmethod
def parse_args(args: Optional[Sequence[str]] = None) -> Dict[str, Any]:
parser: argparse.ArgumentParser = argparse.ArgumentParser()
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_URL}',
dest=constants.JDBCTOJDBC_INPUT_URL,
required=True,
help='JDBC input URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_DRIVER}',
dest=constants.JDBCTOJDBC_INPUT_DRIVER,
required=True,
help='JDBC input driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_TABLE}',
dest=constants.JDBCTOJDBC_INPUT_TABLE,
required=True,
help='JDBC input table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN}',
dest=constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN,
required=False,
default="",
help='JDBC input table partition column name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_LOWERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_LOWERBOUND,
required=False,
default="",
help='JDBC input table partition column lower bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_INPUT_UPPERBOUND}',
dest=constants.JDBCTOJDBC_INPUT_UPPERBOUND,
required=False,
default="",
help='JDBC input table partition column upper bound which is used to decide the partition stride'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_NUMPARTITIONS}',
dest=constants.JDBCTOJDBC_NUMPARTITIONS,
required=False,
default=10,
help='The maximum number of partitions that can be used for parallelism in table reading and writing. Default set to 10'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_URL}',
dest=constants.JDBCTOJDBC_OUTPUT_URL,
required=True,
help='JDBC output URL'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_DRIVER}',
dest=constants.JDBCTOJDBC_OUTPUT_DRIVER,
required=True,
help='JDBC output driver name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_TABLE}',
dest=constants.JDBCTOJDBC_OUTPUT_TABLE,
required=True,
help='JDBC output table name'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION}',
dest=constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION,
required=False,
default="",
help='This option allows setting of database-specific table and partition options when creating a output table'
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_MODE}',
dest=constants.JDBCTOJDBC_OUTPUT_MODE,
required=False,
default=constants.OUTPUT_MODE_APPEND,
help=(
'Output write mode '
'(one of: append,overwrite,ignore,errorifexists) '
'(Defaults to append)'
),
choices=[
constants.OUTPUT_MODE_OVERWRITE,
constants.OUTPUT_MODE_APPEND,
constants.OUTPUT_MODE_IGNORE,
constants.OUTPUT_MODE_ERRORIFEXISTS
]
)
parser.add_argument(
f'--{constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE}',
dest=constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE,
required=False,
default=1000,
help='JDBC output batch size. Default set to 1000'
)
known_args: argparse.Namespace
known_args, _ = parser.parse_known_args(args)
return vars(known_args)
def run(self, spark: SparkSession, args: Dict[str, Any]) -> None:
logger: Logger = self.get_logger(spark=spark)
# Arguments
input_jdbc_url: str = args[constants.JDBCTOJDBC_INPUT_URL]
input_jdbc_driver: str = args[constants.JDBCTOJDBC_INPUT_DRIVER]
input_jdbc_table: str = args[constants.JDBCTOJDBC_INPUT_TABLE]
input_jdbc_partitioncolumn: str = args[constants.JDBCTOJDBC_INPUT_PARTITIONCOLUMN]
input_jdbc_lowerbound: str = args[constants.JDBCTOJDBC_INPUT_LOWERBOUND]
input_jdbc_upperbound: str = args[constants.JDBCTOJDBC_INPUT_UPPERBOUND]
jdbc_numpartitions: str = args[constants.JDBCTOJDBC_NUMPARTITIONS]
output_jdbc_url: str = args[constants.JDBCTOJDBC_OUTPUT_URL]
output_jdbc_driver: str = args[constants.JDBCTOJDBC_OUTPUT_DRIVER]
output_jdbc_table: str = args[constants.JDBCTOJDBC_OUTPUT_TABLE]
output_jdbc_create_table_option: str = args[constants.JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION]
output_jdbc_mode: str = args[constants.JDBCTOJDBC_OUTPUT_MODE]
output_jdbc_batch_size: int = args[constants.JDBCTOJDBC_OUTPUT_BATCH_SIZE]
logger.info(
"Starting JDBC to JDBC spark job with parameters:\n"
f"{pprint.pformat(args)}"
)
# Read
input_data: DataFrame
partition_parameters=str(input_jdbc_partitioncolumn) + str(input_jdbc_lowerbound) + str(input_jdbc_upperbound)
if ((partition_parameters != "") & ((input_jdbc_partitioncolumn == "") | (input_jdbc_lowerbound == "") | (input_jdbc_upperbound == ""))):
logger.error("Set all the sql partitioning parameters together-jdbctojdbc.input.partitioncolumn,jdbctojdbc.input.lowerbound,jdbctojdbc.input.upperbound. Refer to README.md for more instructions.")
exit (1)
elif (partition_parameters == ""):
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, input_jdbc_url) \
.option(constants.JDBC_DRIVER, input_jdbc_driver) \
.option(constants.JDBC_TABLE, input_jdbc_table) \
.option(constants.JDBC_NUMPARTITIONS, jdbc_numpartitions) \
.load()
else:
input_data=spark.read \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, input_jdbc_url) \
.option(constants.JDBC_DRIVER, input_jdbc_driver) \
.option(constants.JDBC_TABLE, input_jdbc_table) \
.option(constants.JDBC_PARTITIONCOLUMN, input_jdbc_partitioncolumn) \
.option(constants.JDBC_LOWERBOUND, input_jdbc_lowerbound) \
.option(constants.JDBC_UPPERBOUND, input_jdbc_upperbound) \
.option(constants.JDBC_NUMPARTITIONS, jdbc_numpartitions) \
.load()
# Write
input_data.write \
.format(constants.FORMAT_JDBC) \
.option(constants.JDBC_URL, output_jdbc_url) \
.option(constants.JDBC_DRIVER, output_jdbc_driver) \
.option(constants.
|
230 | 13 | 1,013 |
googlecloudplatform__dataproc-templates
|
bba5da698a8aa144c73d4d2a90e84c6a577ce7f4
|
python/test/jdbc/test_jdbc_to_gcs.py
|
inproject
|
parse_args
| true |
function
| 4 | 4 | false | true |
[
"parse_args",
"run",
"build",
"get_logger",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "build",
"type": "function"
},
{
"name": "get_logger",
"type": "function"
},
{
"name": "parse_args",
"type": "function"
},
{
"name": "run",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.
|
231 | 13 | 2,586 |
googlecloudplatform__dataproc-templates
|
bba5da698a8aa144c73d4d2a90e84c6a577ce7f4
|
python/test/jdbc/test_jdbc_to_gcs.py
|
Unknown
|
parse_args
| true |
function
| 4 | 4 | false | false |
[
"parse_args",
"run",
"build",
"get_logger",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "build",
"type": "function"
},
{
"name": "get_logger",
"type": "function"
},
{
"name": "parse_args",
"type": "function"
},
{
"name": "run",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.
|
232 | 13 | 3,291 |
googlecloudplatform__dataproc-templates
|
bba5da698a8aa144c73d4d2a90e84c6a577ce7f4
|
python/test/jdbc/test_jdbc_to_gcs.py
|
Unknown
|
run
| true |
function
| 4 | 4 | false | true |
[
"run",
"parse_args",
"build",
"get_logger",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "build",
"type": "function"
},
{
"name": "get_logger",
"type": "function"
},
{
"name": "parse_args",
"type": "function"
},
{
"name": "run",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.
|
233 | 13 | 3,401 |
googlecloudplatform__dataproc-templates
|
bba5da698a8aa144c73d4d2a90e84c6a577ce7f4
|
python/test/jdbc/test_jdbc_to_gcs.py
|
Unknown
|
FORMAT_JDBC
| true |
statement
| 86 | 86 | false | true |
[
"FORMAT_JDBC",
"JDBC_URL",
"JDBC_TABLE",
"JDBC_DRIVER",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.
|
234 | 13 | 3,491 |
googlecloudplatform__dataproc-templates
|
bba5da698a8aa144c73d4d2a90e84c6a577ce7f4
|
python/test/jdbc/test_jdbc_to_gcs.py
|
Unknown
|
JDBC_URL
| true |
statement
| 86 | 86 | false | true |
[
"FORMAT_JDBC",
"JDBC_TABLE",
"JDBC_DRIVER",
"JDBC_URL",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.
|
235 | 13 | 3,594 |
googlecloudplatform__dataproc-templates
|
bba5da698a8aa144c73d4d2a90e84c6a577ce7f4
|
python/test/jdbc/test_jdbc_to_gcs.py
|
Unknown
|
JDBC_DRIVER
| true |
statement
| 86 | 86 | false | true |
[
"FORMAT_JDBC",
"JDBC_URL",
"JDBC_TABLE",
"JDBC_NUMPARTITIONS",
"JDBC_DRIVER",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.
|
236 | 13 | 3,712 |
googlecloudplatform__dataproc-templates
|
bba5da698a8aa144c73d4d2a90e84c6a577ce7f4
|
python/test/jdbc/test_jdbc_to_gcs.py
|
Unknown
|
JDBC_TABLE
| true |
statement
| 86 | 86 | false | true |
[
"FORMAT_JDBC",
"JDBC_URL",
"JDBC_DRIVER",
"JDBC_TABLE",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.
|
237 | 13 | 3,838 |
googlecloudplatform__dataproc-templates
|
bba5da698a8aa144c73d4d2a90e84c6a577ce7f4
|
python/test/jdbc/test_jdbc_to_gcs.py
|
Unknown
|
JDBC_PARTITIONCOLUMN
| true |
statement
| 86 | 86 | false | true |
[
"FORMAT_JDBC",
"JDBC_URL",
"JDBC_TABLE",
"JDBC_DRIVER",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.
|
238 | 13 | 3,983 |
googlecloudplatform__dataproc-templates
|
bba5da698a8aa144c73d4d2a90e84c6a577ce7f4
|
python/test/jdbc/test_jdbc_to_gcs.py
|
Unknown
|
JDBC_LOWERBOUND
| true |
statement
| 86 | 86 | false | true |
[
"FORMAT_JDBC",
"JDBC_URL",
"JDBC_TABLE",
"JDBC_DRIVER",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.
|
239 | 13 | 4,127 |
googlecloudplatform__dataproc-templates
|
bba5da698a8aa144c73d4d2a90e84c6a577ce7f4
|
python/test/jdbc/test_jdbc_to_gcs.py
|
Unknown
|
JDBC_UPPERBOUND
| true |
statement
| 86 | 86 | false | true |
[
"FORMAT_JDBC",
"JDBC_URL",
"JDBC_TABLE",
"JDBC_DRIVER",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.
|
240 | 13 | 4,280 |
googlecloudplatform__dataproc-templates
|
bba5da698a8aa144c73d4d2a90e84c6a577ce7f4
|
python/test/jdbc/test_jdbc_to_gcs.py
|
Unknown
|
JDBC_NUMPARTITIONS
| true |
statement
| 86 | 86 | false | true |
[
"FORMAT_JDBC",
"JDBC_URL",
"JDBC_TABLE",
"JDBC_DRIVER",
"JDBC_LOWERBOUND",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_NUMPARTITIONS",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.
|
241 | 13 | 4,508 |
googlecloudplatform__dataproc-templates
|
bba5da698a8aa144c73d4d2a90e84c6a577ce7f4
|
python/test/jdbc/test_jdbc_to_gcs.py
|
Unknown
|
OUTPUT_MODE_OVERWRITE
| true |
statement
| 86 | 86 | false | true |
[
"JDBC_URL",
"JDBC_TABLE",
"FORMAT_JDBC",
"JDBC_DRIVER",
"JDBC_NUMPARTITIONS",
"BQ_GCS_INPUT_TABLE",
"BQ_GCS_OUTPUT_FORMAT",
"BQ_GCS_OUTPUT_LOCATION",
"BQ_GCS_OUTPUT_MODE",
"CSV_HEADER",
"CSV_INFER_SCHEMA",
"FORMAT_AVRO",
"FORMAT_AVRO_EXTD",
"FORMAT_BIGQUERY",
"FORMAT_CSV",
"FORMAT_HBASE",
"FORMAT_JSON",
"FORMAT_PRQT",
"GCS_BQ_INPUT_FORMAT",
"GCS_BQ_INPUT_LOCATION",
"GCS_BQ_LD_TEMP_BUCKET_NAME",
"GCS_BQ_OUTPUT_DATASET",
"GCS_BQ_OUTPUT_MODE",
"GCS_BQ_OUTPUT_TABLE",
"GCS_BQ_TEMP_BUCKET",
"GCS_BT_HBASE_CATALOG_JSON",
"GCS_BT_INPUT_FORMAT",
"GCS_BT_INPUT_LOCATION",
"GCS_JDBC_BATCH_SIZE",
"GCS_JDBC_INPUT_FORMAT",
"GCS_JDBC_INPUT_LOCATION",
"GCS_JDBC_OUTPUT_DRIVER",
"GCS_JDBC_OUTPUT_MODE",
"GCS_JDBC_OUTPUT_TABLE",
"GCS_JDBC_OUTPUT_URL",
"HBASE_GCS_CATALOG_JSON",
"HBASE_GCS_OUTPUT_FORMAT",
"HBASE_GCS_OUTPUT_LOCATION",
"HBASE_GCS_OUTPUT_MODE",
"HIVE_BQ_INPUT_DATABASE",
"HIVE_BQ_INPUT_TABLE",
"HIVE_BQ_LD_TEMP_BUCKET_NAME",
"HIVE_BQ_OUTPUT_DATASET",
"HIVE_BQ_OUTPUT_MODE",
"HIVE_BQ_OUTPUT_TABLE",
"HIVE_GCS_INPUT_DATABASE",
"HIVE_GCS_INPUT_TABLE",
"HIVE_GCS_OUTPUT_FORMAT",
"HIVE_GCS_OUTPUT_LOCATION",
"HIVE_GCS_OUTPUT_MODE",
"JDBC_BATCH_SIZE",
"JDBC_CREATE_TABLE_OPTIONS",
"JDBC_LOWERBOUND",
"JDBC_PARTITIONCOLUMN",
"JDBC_UPPERBOUND",
"JDBCTOGCS_INPUT_DRIVER",
"JDBCTOGCS_INPUT_LOWERBOUND",
"JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"JDBCTOGCS_INPUT_TABLE",
"JDBCTOGCS_INPUT_UPPERBOUND",
"JDBCTOGCS_INPUT_URL",
"JDBCTOGCS_NUMPARTITIONS",
"JDBCTOGCS_OUTPUT_FORMAT",
"JDBCTOGCS_OUTPUT_LOCATION",
"JDBCTOGCS_OUTPUT_MODE",
"JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_DRIVER",
"JDBCTOJDBC_INPUT_LOWERBOUND",
"JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"JDBCTOJDBC_INPUT_TABLE",
"JDBCTOJDBC_INPUT_UPPERBOUND",
"JDBCTOJDBC_INPUT_URL",
"JDBCTOJDBC_NUMPARTITIONS",
"JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"JDBCTOJDBC_OUTPUT_DRIVER",
"JDBCTOJDBC_OUTPUT_MODE",
"JDBCTOJDBC_OUTPUT_TABLE",
"JDBCTOJDBC_OUTPUT_URL",
"OUTPUT_MODE_APPEND",
"OUTPUT_MODE_ERRORIFEXISTS",
"OUTPUT_MODE_IGNORE",
"OUTPUT_MODE_OVERWRITE",
"PROJECT_ID_PROP",
"TABLE",
"TEMP_GCS_BUCKET"
] |
[
{
"name": "BQ_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "BQ_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "CSV_HEADER",
"type": "statement"
},
{
"name": "CSV_INFER_SCHEMA",
"type": "statement"
},
{
"name": "FORMAT_AVRO",
"type": "statement"
},
{
"name": "FORMAT_AVRO_EXTD",
"type": "statement"
},
{
"name": "FORMAT_BIGQUERY",
"type": "statement"
},
{
"name": "FORMAT_CSV",
"type": "statement"
},
{
"name": "FORMAT_HBASE",
"type": "statement"
},
{
"name": "FORMAT_JDBC",
"type": "statement"
},
{
"name": "FORMAT_JSON",
"type": "statement"
},
{
"name": "FORMAT_PRQT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BQ_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_BQ_TEMP_BUCKET",
"type": "statement"
},
{
"name": "GCS_BT_HBASE_CATALOG_JSON",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_BT_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_FORMAT",
"type": "statement"
},
{
"name": "GCS_JDBC_INPUT_LOCATION",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "GCS_JDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "HBASE_GCS_CATALOG_JSON",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HBASE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_BQ_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_BQ_LD_TEMP_BUCKET_NAME",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_DATASET",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_MODE",
"type": "statement"
},
{
"name": "HIVE_BQ_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_DATABASE",
"type": "statement"
},
{
"name": "HIVE_GCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "HIVE_GCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBC_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBC_CREATE_TABLE_OPTIONS",
"type": "statement"
},
{
"name": "JDBC_DRIVER",
"type": "statement"
},
{
"name": "JDBC_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBC_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBC_TABLE",
"type": "statement"
},
{
"name": "JDBC_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBC_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOGCS_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOGCS_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_FORMAT",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_LOCATION",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOGCS_OUTPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_LOWERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_PARTITIONCOLUMN",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_UPPERBOUND",
"type": "statement"
},
{
"name": "JDBCTOJDBC_INPUT_URL",
"type": "statement"
},
{
"name": "JDBCTOJDBC_NUMPARTITIONS",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_BATCH_SIZE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_CREATE_TABLE_OPTION",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_DRIVER",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_MODE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_TABLE",
"type": "statement"
},
{
"name": "JDBCTOJDBC_OUTPUT_URL",
"type": "statement"
},
{
"name": "OUTPUT_MODE_APPEND",
"type": "statement"
},
{
"name": "OUTPUT_MODE_ERRORIFEXISTS",
"type": "statement"
},
{
"name": "OUTPUT_MODE_IGNORE",
"type": "statement"
},
{
"name": "OUTPUT_MODE_OVERWRITE",
"type": "statement"
},
{
"name": "PROJECT_ID_PROP",
"type": "statement"
},
{
"name": "TABLE",
"type": "statement"
},
{
"name": "TEMP_GCS_BUCKET",
"type": "statement"
},
{
"name": "__doc__",
"type": "instance"
},
{
"name": "__file__",
"type": "instance"
},
{
"name": "__name__",
"type": "instance"
},
{
"name": "__package__",
"type": "instance"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.
|
242 | 13 | 4,902 |
googlecloudplatform__dataproc-templates
|
bba5da698a8aa144c73d4d2a90e84c6a577ce7f4
|
python/test/jdbc/test_jdbc_to_gcs.py
|
Unknown
|
parse_args
| true |
function
| 4 | 4 | false | false |
[
"parse_args",
"run",
"build",
"get_logger",
"__annotations__",
"__class__",
"__delattr__",
"__dict__",
"__dir__",
"__eq__",
"__format__",
"__getattribute__",
"__hash__",
"__init__",
"__init_subclass__",
"__ne__",
"__new__",
"__reduce__",
"__reduce_ex__",
"__repr__",
"__setattr__",
"__sizeof__",
"__slots__",
"__str__",
"__subclasshook__",
"__doc__",
"__module__"
] |
[
{
"name": "build",
"type": "function"
},
{
"name": "get_logger",
"type": "function"
},
{
"name": "parse_args",
"type": "function"
},
{
"name": "run",
"type": "function"
},
{
"name": "__annotations__",
"type": "statement"
},
{
"name": "__class__",
"type": "property"
},
{
"name": "__delattr__",
"type": "function"
},
{
"name": "__dict__",
"type": "statement"
},
{
"name": "__dir__",
"type": "function"
},
{
"name": "__doc__",
"type": "statement"
},
{
"name": "__eq__",
"type": "function"
},
{
"name": "__format__",
"type": "function"
},
{
"name": "__getattribute__",
"type": "function"
},
{
"name": "__hash__",
"type": "function"
},
{
"name": "__init__",
"type": "function"
},
{
"name": "__init_subclass__",
"type": "function"
},
{
"name": "__module__",
"type": "statement"
},
{
"name": "__ne__",
"type": "function"
},
{
"name": "__new__",
"type": "function"
},
{
"name": "__reduce__",
"type": "function"
},
{
"name": "__reduce_ex__",
"type": "function"
},
{
"name": "__repr__",
"type": "function"
},
{
"name": "__setattr__",
"type": "function"
},
{
"name": "__sizeof__",
"type": "function"
},
{
"name": "__slots__",
"type": "statement"
},
{
"name": "__str__",
"type": "function"
}
] |
"""
* Copyright 2022 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import mock
import pyspark
from dataproc_templates.jdbc.jdbc_to_gcs import JDBCToGCSTemplate
import dataproc_templates.util.template_constants as constants
class TestJDBCToGCSTemplate:
"""
Test suite for JDBCToGCSTemplate
"""
def test_parse_args1(self):
"""Tests JDBCToGCSTemplate.parse_args()"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=csv",
"--jdbctogcs.output.mode=append",
"--jdbctogcs.output.partitioncolumn=column"
])
assert parsed_args["jdbctogcs.input.url"] == "url"
assert parsed_args["jdbctogcs.input.driver"] == "driver"
assert parsed_args["jdbctogcs.input.table"] == "table1"
assert parsed_args["jdbctogcs.input.partitioncolumn"] == "column"
assert parsed_args["jdbctogcs.input.lowerbound"] == "1"
assert parsed_args["jdbctogcs.input.upperbound"] == "2"
assert parsed_args["jdbctogcs.numpartitions"] == "5"
assert parsed_args["jdbctogcs.output.location"] == "gs://test"
assert parsed_args["jdbctogcs.output.format"] == "csv"
assert parsed_args["jdbctogcs.output.mode"] == "append"
assert parsed_args["jdbctogcs.output.partitioncolumn"] == "column"
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args2(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write parquet"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.parse_args(
["--jdbctogcs.input.url=url",
"--jdbctogcs.input.driver=driver",
"--jdbctogcs.input.table=table1",
"--jdbctogcs.input.partitioncolumn=column",
"--jdbctogcs.input.lowerbound=1",
"--jdbctogcs.input.upperbound=2",
"--jdbctogcs.numpartitions=5",
"--jdbctogcs.output.location=gs://test",
"--jdbctogcs.output.format=parquet",
"--jdbctogcs.output.mode=overwrite"
])
mock_spark_session.read.format().option().option().option().option().option().option().option().load.return_value = mock_spark_session.dataframe.DataFrame
jdbc_to_gcs_template.run(mock_spark_session, mock_parsed_args)
mock_spark_session.read.format.assert_called_with(constants.FORMAT_JDBC)
mock_spark_session.read.format().option.assert_called_with(constants.JDBC_URL, "url")
mock_spark_session.read.format().option().option.assert_called_with(constants.JDBC_DRIVER, "driver")
mock_spark_session.read.format().option().option().option.assert_called_with(constants.JDBC_TABLE, "table1")
mock_spark_session.read.format().option().option().option().option.assert_called_with(constants.JDBC_PARTITIONCOLUMN, "column")
mock_spark_session.read.format().option().option().option().option().option.assert_called_with(constants.JDBC_LOWERBOUND, "1")
mock_spark_session.read.format().option().option().option().option().option().option.assert_called_with(constants.JDBC_UPPERBOUND, "2")
mock_spark_session.read.format().option().option().option().option().option().option().option.assert_called_with(constants.JDBC_NUMPARTITIONS, "5")
mock_spark_session.read.format().option().option().option().option().option().option().option().load()
mock_spark_session.dataframe.DataFrame.write.mode.assert_called_once_with(constants.OUTPUT_MODE_OVERWRITE)
mock_spark_session.dataframe.DataFrame.write.mode().parquet.assert_called_once_with("gs://test")
@mock.patch.object(pyspark.sql, 'SparkSession')
def test_run_pass_args3(self, mock_spark_session):
"""Tests JDBCToGCSTemplate write avro"""
jdbc_to_gcs_template = JDBCToGCSTemplate()
mock_parsed_args = jdbc_to_gcs_template.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.