idx
int64
0
7.85k
idx_lca
int64
0
223
offset
int64
162
55k
repo
stringclasses
62 values
commit_hash
stringclasses
113 values
target_file
stringclasses
134 values
line_type_lca
stringclasses
7 values
ground_truth
stringlengths
1
46
in_completions
bool
1 class
completion_type
stringclasses
6 values
non_dunder_count_intellij
int64
0
529
non_dunder_count_jedi
int64
0
128
start_with_
bool
2 classes
first_occurrence
bool
2 classes
intellij_completions
listlengths
1
532
jedi_completions
listlengths
3
148
prefix
stringlengths
162
55k
561
19
1,735
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
Unknown
util
true
module
33
35
false
false
[ "util", "soma", "SOMA", "util_ann", "soma_options", "annotation_matrix", "annotation_matrix_group", "annotation_pairwise_matrix_group", "AnnotationMatrix", "AnnotationMatrixGroup", "AnnotationPairwiseMatrixGroup", "assay_matrix", "assay_matrix_group", "AssayMatrix", "AssayMatrixGroup", "describe_ann_file", "raw_group", "RawGroup", "show_single_cell_group", "soma_collection", "SOMACollection", "SOMAOptions", "tiledb_array", "tiledb_group", "tiledb_object", "TileDBArray", "TileDBGroup", "TileDBObject", "uns_array", "uns_group", "UnsArray", "UnsGroup", "util_tiledb" ]
[ { "name": "annotation_dataframe", "type": "module" }, { "name": "annotation_matrix", "type": "module" }, { "name": "annotation_matrix_group", "type": "module" }, { "name": "annotation_pairwise_matrix_group", "type": "module" }, { "name": "AnnotationMatrix", "type": "class" }, { "name": "AnnotationMatrixGroup", "type": "class" }, { "name": "AnnotationPairwiseMatrixGroup", "type": "class" }, { "name": "assay_matrix", "type": "module" }, { "name": "assay_matrix_group", "type": "module" }, { "name": "AssayMatrix", "type": "class" }, { "name": "AssayMatrixGroup", "type": "class" }, { "name": "describe_ann_file", "type": "function" }, { "name": "io", "type": "module" }, { "name": "raw_group", "type": "module" }, { "name": "RawGroup", "type": "class" }, { "name": "show_single_cell_group", "type": "function" }, { "name": "SOMA", "type": "class" }, { "name": "soma", "type": "module" }, { "name": "soma_collection", "type": "module" }, { "name": "soma_options", "type": "module" }, { "name": "SOMACollection", "type": "class" }, { "name": "SOMAOptions", "type": "class" }, { "name": "tiledb_array", "type": "module" }, { "name": "tiledb_group", "type": "module" }, { "name": "tiledb_object", "type": "module" }, { "name": "TileDBArray", "type": "class" }, { "name": "TileDBGroup", "type": "class" }, { "name": "TileDBObject", "type": "class" }, { "name": "uns_array", "type": "module" }, { "name": "uns_group", "type": "module" }, { "name": "UnsArray", "type": "class" }, { "name": "UnsGroup", "type": "class" }, { "name": "util", "type": "module" }, { "name": "util_ann", "type": "module" }, { "name": "util_tiledb", "type": "module" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.
562
19
1,740
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
Unknown
format_elapsed
true
function
10
16
false
false
[ "format_elapsed", "get_start_stamp", "List", "Optional", "ETATracker", "_find_csc_chunk_size", "_find_csr_chunk_size", "_get_sort_and_permutation", "_to_tiledb_supported_array_type", "_X_and_ids_to_coo" ]
[ { "name": "ad", "type": "module" }, { "name": "ETATracker", "type": "class" }, { "name": "format_elapsed", "type": "function" }, { "name": "get_start_stamp", "type": "function" }, { "name": "List", "type": "class" }, { "name": "numpy", "type": "module" }, { "name": "Optional", "type": "class" }, { "name": "pd", "type": "module" }, { "name": "scipy", "type": "module" }, { "name": "tiledb", "type": "module" }, { "name": "time", "type": "module" }, { "name": "_find_csc_chunk_size", "type": "function" }, { "name": "_find_csr_chunk_size", "type": "function" }, { "name": "_get_sort_and_permutation", "type": "function" }, { "name": "_to_tiledb_supported_array_type", "type": "function" }, { "name": "_X_and_ids_to_coo", "type": "function" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.
564
19
1,970
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
Unknown
SOMA
true
class
34
35
false
false
[ "SOMA", "soma", "util", "None", "util_ann", "annotation_matrix", "annotation_matrix_group", "annotation_pairwise_matrix_group", "AnnotationMatrix", "AnnotationMatrixGroup", "AnnotationPairwiseMatrixGroup", "assay_matrix", "assay_matrix_group", "AssayMatrix", "AssayMatrixGroup", "describe_ann_file", "raw_group", "RawGroup", "show_single_cell_group", "soma_collection", "soma_options", "SOMACollection", "SOMAOptions", "tiledb_array", "tiledb_group", "tiledb_object", "TileDBArray", "TileDBGroup", "TileDBObject", "uns_array", "uns_group", "UnsArray", "UnsGroup", "util_tiledb" ]
[ { "name": "annotation_dataframe", "type": "module" }, { "name": "annotation_matrix", "type": "module" }, { "name": "annotation_matrix_group", "type": "module" }, { "name": "annotation_pairwise_matrix_group", "type": "module" }, { "name": "AnnotationMatrix", "type": "class" }, { "name": "AnnotationMatrixGroup", "type": "class" }, { "name": "AnnotationPairwiseMatrixGroup", "type": "class" }, { "name": "assay_matrix", "type": "module" }, { "name": "assay_matrix_group", "type": "module" }, { "name": "AssayMatrix", "type": "class" }, { "name": "AssayMatrixGroup", "type": "class" }, { "name": "describe_ann_file", "type": "function" }, { "name": "io", "type": "module" }, { "name": "raw_group", "type": "module" }, { "name": "RawGroup", "type": "class" }, { "name": "show_single_cell_group", "type": "function" }, { "name": "SOMA", "type": "class" }, { "name": "soma", "type": "module" }, { "name": "soma_collection", "type": "module" }, { "name": "soma_options", "type": "module" }, { "name": "SOMACollection", "type": "class" }, { "name": "SOMAOptions", "type": "class" }, { "name": "tiledb_array", "type": "module" }, { "name": "tiledb_group", "type": "module" }, { "name": "tiledb_object", "type": "module" }, { "name": "TileDBArray", "type": "class" }, { "name": "TileDBGroup", "type": "class" }, { "name": "TileDBObject", "type": "class" }, { "name": "uns_array", "type": "module" }, { "name": "uns_group", "type": "module" }, { "name": "UnsArray", "type": "class" }, { "name": "UnsGroup", "type": "class" }, { "name": "util", "type": "module" }, { "name": "util_ann", "type": "module" }, { "name": "util_tiledb", "type": "module" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.
566
19
2,371
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
Unknown
util
true
module
33
35
false
false
[ "soma", "util", "SOMA", "util_ann", "soma_options", "annotation_matrix", "annotation_matrix_group", "annotation_pairwise_matrix_group", "AnnotationMatrix", "AnnotationMatrixGroup", "AnnotationPairwiseMatrixGroup", "assay_matrix", "assay_matrix_group", "AssayMatrix", "AssayMatrixGroup", "describe_ann_file", "raw_group", "RawGroup", "show_single_cell_group", "soma_collection", "SOMACollection", "SOMAOptions", "tiledb_array", "tiledb_group", "tiledb_object", "TileDBArray", "TileDBGroup", "TileDBObject", "uns_array", "uns_group", "UnsArray", "UnsGroup", "util_tiledb" ]
[ { "name": "annotation_dataframe", "type": "module" }, { "name": "annotation_matrix", "type": "module" }, { "name": "annotation_matrix_group", "type": "module" }, { "name": "annotation_pairwise_matrix_group", "type": "module" }, { "name": "AnnotationMatrix", "type": "class" }, { "name": "AnnotationMatrixGroup", "type": "class" }, { "name": "AnnotationPairwiseMatrixGroup", "type": "class" }, { "name": "assay_matrix", "type": "module" }, { "name": "assay_matrix_group", "type": "module" }, { "name": "AssayMatrix", "type": "class" }, { "name": "AssayMatrixGroup", "type": "class" }, { "name": "describe_ann_file", "type": "function" }, { "name": "io", "type": "module" }, { "name": "raw_group", "type": "module" }, { "name": "RawGroup", "type": "class" }, { "name": "show_single_cell_group", "type": "function" }, { "name": "SOMA", "type": "class" }, { "name": "soma", "type": "module" }, { "name": "soma_collection", "type": "module" }, { "name": "soma_options", "type": "module" }, { "name": "SOMACollection", "type": "class" }, { "name": "SOMAOptions", "type": "class" }, { "name": "tiledb_array", "type": "module" }, { "name": "tiledb_group", "type": "module" }, { "name": "tiledb_object", "type": "module" }, { "name": "TileDBArray", "type": "class" }, { "name": "TileDBGroup", "type": "class" }, { "name": "TileDBObject", "type": "class" }, { "name": "uns_array", "type": "module" }, { "name": "uns_group", "type": "module" }, { "name": "UnsArray", "type": "class" }, { "name": "UnsGroup", "type": "class" }, { "name": "util", "type": "module" }, { "name": "util_ann", "type": "module" }, { "name": "util_tiledb", "type": "module" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.
567
19
2,376
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
Unknown
get_start_stamp
true
function
10
16
false
false
[ "format_elapsed", "get_start_stamp", "List", "Optional", "ETATracker", "_find_csc_chunk_size", "_find_csr_chunk_size", "_get_sort_and_permutation", "_to_tiledb_supported_array_type", "_X_and_ids_to_coo" ]
[ { "name": "ad", "type": "module" }, { "name": "ETATracker", "type": "class" }, { "name": "format_elapsed", "type": "function" }, { "name": "get_start_stamp", "type": "function" }, { "name": "List", "type": "class" }, { "name": "numpy", "type": "module" }, { "name": "Optional", "type": "class" }, { "name": "pd", "type": "module" }, { "name": "scipy", "type": "module" }, { "name": "tiledb", "type": "module" }, { "name": "time", "type": "module" }, { "name": "_find_csc_chunk_size", "type": "function" }, { "name": "_find_csr_chunk_size", "type": "function" }, { "name": "_get_sort_and_permutation", "type": "function" }, { "name": "_to_tiledb_supported_array_type", "type": "function" }, { "name": "_X_and_ids_to_coo", "type": "function" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.
569
19
2,547
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
inproject
util_ann
true
module
33
35
false
true
[ "soma", "util", "SOMA", "util_ann", "soma_options", "annotation_matrix", "annotation_matrix_group", "annotation_pairwise_matrix_group", "AnnotationMatrix", "AnnotationMatrixGroup", "AnnotationPairwiseMatrixGroup", "assay_matrix", "assay_matrix_group", "AssayMatrix", "AssayMatrixGroup", "describe_ann_file", "raw_group", "RawGroup", "show_single_cell_group", "soma_collection", "SOMACollection", "SOMAOptions", "tiledb_array", "tiledb_group", "tiledb_object", "TileDBArray", "TileDBGroup", "TileDBObject", "uns_array", "uns_group", "UnsArray", "UnsGroup", "util_tiledb" ]
[ { "name": "annotation_dataframe", "type": "module" }, { "name": "annotation_matrix", "type": "module" }, { "name": "annotation_matrix_group", "type": "module" }, { "name": "annotation_pairwise_matrix_group", "type": "module" }, { "name": "AnnotationMatrix", "type": "class" }, { "name": "AnnotationMatrixGroup", "type": "class" }, { "name": "AnnotationPairwiseMatrixGroup", "type": "class" }, { "name": "assay_matrix", "type": "module" }, { "name": "assay_matrix_group", "type": "module" }, { "name": "AssayMatrix", "type": "class" }, { "name": "AssayMatrixGroup", "type": "class" }, { "name": "describe_ann_file", "type": "function" }, { "name": "io", "type": "module" }, { "name": "raw_group", "type": "module" }, { "name": "RawGroup", "type": "class" }, { "name": "show_single_cell_group", "type": "function" }, { "name": "SOMA", "type": "class" }, { "name": "soma", "type": "module" }, { "name": "soma_collection", "type": "module" }, { "name": "soma_options", "type": "module" }, { "name": "SOMACollection", "type": "class" }, { "name": "SOMAOptions", "type": "class" }, { "name": "tiledb_array", "type": "module" }, { "name": "tiledb_group", "type": "module" }, { "name": "tiledb_object", "type": "module" }, { "name": "TileDBArray", "type": "class" }, { "name": "TileDBGroup", "type": "class" }, { "name": "TileDBObject", "type": "class" }, { "name": "uns_array", "type": "module" }, { "name": "uns_group", "type": "module" }, { "name": "UnsArray", "type": "class" }, { "name": "UnsGroup", "type": "class" }, { "name": "util", "type": "module" }, { "name": "util_ann", "type": "module" }, { "name": "util_tiledb", "type": "module" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.
570
19
2,556
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
inproject
_decategoricalize
true
function
8
14
true
true
[ "describe_ann_file", "_describe_ann_file_show_data", "_describe_ann_file_show_summary", "_describe_ann_file_show_types", "_decategoricalize", "_describe_ann_file_show_uns_data", "_describe_ann_file_show_uns_summary", "_describe_ann_file_show_uns_types" ]
[ { "name": "ad", "type": "module" }, { "name": "describe_ann_file", "type": "function" }, { "name": "np", "type": "module" }, { "name": "os", "type": "module" }, { "name": "pd", "type": "module" }, { "name": "scipy", "type": "module" }, { "name": "util", "type": "module" }, { "name": "_decategoricalize", "type": "function" }, { "name": "_describe_ann_file_show_data", "type": "function" }, { "name": "_describe_ann_file_show_summary", "type": "function" }, { "name": "_describe_ann_file_show_types", "type": "function" }, { "name": "_describe_ann_file_show_uns_data", "type": "function" }, { "name": "_describe_ann_file_show_uns_summary", "type": "function" }, { "name": "_describe_ann_file_show_uns_types", "type": "function" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann.
572
19
2,641
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
inproject
util
true
module
33
35
false
false
[ "soma", "util", "SOMA", "util_ann", "soma_options", "annotation_matrix", "annotation_matrix_group", "annotation_pairwise_matrix_group", "AnnotationMatrix", "AnnotationMatrixGroup", "AnnotationPairwiseMatrixGroup", "assay_matrix", "assay_matrix_group", "AssayMatrix", "AssayMatrixGroup", "describe_ann_file", "raw_group", "RawGroup", "show_single_cell_group", "soma_collection", "SOMACollection", "SOMAOptions", "tiledb_array", "tiledb_group", "tiledb_object", "TileDBArray", "TileDBGroup", "TileDBObject", "uns_array", "uns_group", "UnsArray", "UnsGroup", "util_tiledb" ]
[ { "name": "annotation_dataframe", "type": "module" }, { "name": "annotation_matrix", "type": "module" }, { "name": "annotation_matrix_group", "type": "module" }, { "name": "annotation_pairwise_matrix_group", "type": "module" }, { "name": "AnnotationMatrix", "type": "class" }, { "name": "AnnotationMatrixGroup", "type": "class" }, { "name": "AnnotationPairwiseMatrixGroup", "type": "class" }, { "name": "assay_matrix", "type": "module" }, { "name": "assay_matrix_group", "type": "module" }, { "name": "AssayMatrix", "type": "class" }, { "name": "AssayMatrixGroup", "type": "class" }, { "name": "describe_ann_file", "type": "function" }, { "name": "io", "type": "module" }, { "name": "raw_group", "type": "module" }, { "name": "RawGroup", "type": "class" }, { "name": "show_single_cell_group", "type": "function" }, { "name": "SOMA", "type": "class" }, { "name": "soma", "type": "module" }, { "name": "soma_collection", "type": "module" }, { "name": "soma_options", "type": "module" }, { "name": "SOMACollection", "type": "class" }, { "name": "SOMAOptions", "type": "class" }, { "name": "tiledb_array", "type": "module" }, { "name": "tiledb_group", "type": "module" }, { "name": "tiledb_object", "type": "module" }, { "name": "TileDBArray", "type": "class" }, { "name": "TileDBGroup", "type": "class" }, { "name": "TileDBObject", "type": "class" }, { "name": "uns_array", "type": "module" }, { "name": "uns_group", "type": "module" }, { "name": "UnsArray", "type": "class" }, { "name": "UnsGroup", "type": "class" }, { "name": "util", "type": "module" }, { "name": "util_ann", "type": "module" }, { "name": "util_tiledb", "type": "module" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.
573
19
2,646
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
inproject
format_elapsed
true
function
10
16
false
false
[ "format_elapsed", "get_start_stamp", "List", "Optional", "ETATracker", "_find_csc_chunk_size", "_find_csr_chunk_size", "_get_sort_and_permutation", "_to_tiledb_supported_array_type", "_X_and_ids_to_coo" ]
[ { "name": "ad", "type": "module" }, { "name": "ETATracker", "type": "class" }, { "name": "format_elapsed", "type": "function" }, { "name": "get_start_stamp", "type": "function" }, { "name": "List", "type": "class" }, { "name": "numpy", "type": "module" }, { "name": "Optional", "type": "class" }, { "name": "pd", "type": "module" }, { "name": "scipy", "type": "module" }, { "name": "tiledb", "type": "module" }, { "name": "time", "type": "module" }, { "name": "_find_csc_chunk_size", "type": "function" }, { "name": "_find_csr_chunk_size", "type": "function" }, { "name": "_get_sort_and_permutation", "type": "function" }, { "name": "_to_tiledb_supported_array_type", "type": "function" }, { "name": "_X_and_ids_to_coo", "type": "function" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.
576
19
2,762
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
inproject
util
true
module
33
35
false
false
[ "soma", "util", "SOMA", "util_ann", "soma_options", "annotation_matrix", "annotation_matrix_group", "annotation_pairwise_matrix_group", "AnnotationMatrix", "AnnotationMatrixGroup", "AnnotationPairwiseMatrixGroup", "assay_matrix", "assay_matrix_group", "AssayMatrix", "AssayMatrixGroup", "describe_ann_file", "raw_group", "RawGroup", "show_single_cell_group", "soma_collection", "SOMACollection", "SOMAOptions", "tiledb_array", "tiledb_group", "tiledb_object", "TileDBArray", "TileDBGroup", "TileDBObject", "uns_array", "uns_group", "UnsArray", "UnsGroup", "util_tiledb" ]
[ { "name": "annotation_dataframe", "type": "module" }, { "name": "annotation_matrix", "type": "module" }, { "name": "annotation_matrix_group", "type": "module" }, { "name": "annotation_pairwise_matrix_group", "type": "module" }, { "name": "AnnotationMatrix", "type": "class" }, { "name": "AnnotationMatrixGroup", "type": "class" }, { "name": "AnnotationPairwiseMatrixGroup", "type": "class" }, { "name": "assay_matrix", "type": "module" }, { "name": "assay_matrix_group", "type": "module" }, { "name": "AssayMatrix", "type": "class" }, { "name": "AssayMatrixGroup", "type": "class" }, { "name": "describe_ann_file", "type": "function" }, { "name": "io", "type": "module" }, { "name": "raw_group", "type": "module" }, { "name": "RawGroup", "type": "class" }, { "name": "show_single_cell_group", "type": "function" }, { "name": "SOMA", "type": "class" }, { "name": "soma", "type": "module" }, { "name": "soma_collection", "type": "module" }, { "name": "soma_options", "type": "module" }, { "name": "SOMACollection", "type": "class" }, { "name": "SOMAOptions", "type": "class" }, { "name": "tiledb_array", "type": "module" }, { "name": "tiledb_group", "type": "module" }, { "name": "tiledb_object", "type": "module" }, { "name": "TileDBArray", "type": "class" }, { "name": "TileDBGroup", "type": "class" }, { "name": "TileDBObject", "type": "class" }, { "name": "uns_array", "type": "module" }, { "name": "uns_group", "type": "module" }, { "name": "UnsArray", "type": "class" }, { "name": "UnsGroup", "type": "class" }, { "name": "util", "type": "module" }, { "name": "util_ann", "type": "module" }, { "name": "util_tiledb", "type": "module" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.
577
19
2,767
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
inproject
get_start_stamp
true
function
10
16
false
false
[ "format_elapsed", "get_start_stamp", "List", "Optional", "ETATracker", "_find_csc_chunk_size", "_find_csr_chunk_size", "_get_sort_and_permutation", "_to_tiledb_supported_array_type", "_X_and_ids_to_coo" ]
[ { "name": "ad", "type": "module" }, { "name": "ETATracker", "type": "class" }, { "name": "format_elapsed", "type": "function" }, { "name": "get_start_stamp", "type": "function" }, { "name": "List", "type": "class" }, { "name": "numpy", "type": "module" }, { "name": "Optional", "type": "class" }, { "name": "pd", "type": "module" }, { "name": "scipy", "type": "module" }, { "name": "tiledb", "type": "module" }, { "name": "time", "type": "module" }, { "name": "_find_csc_chunk_size", "type": "function" }, { "name": "_find_csr_chunk_size", "type": "function" }, { "name": "_get_sort_and_permutation", "type": "function" }, { "name": "_to_tiledb_supported_array_type", "type": "function" }, { "name": "_X_and_ids_to_coo", "type": "function" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.
582
19
3,002
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
common
from_matrix_and_dim_values
true
function
5
18
false
true
[ "data", "uri", "name", "exists", "from_matrix_and_dim_values", "__init__", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "data", "type": "statement" }, { "name": "exists", "type": "function" }, { "name": "from_matrix_and_dim_values", "type": "function" }, { "name": "name", "type": "statement" }, { "name": "uri", "type": "statement" }, { "name": "_add_object", "type": "function" }, { "name": "_create", "type": "function" }, { "name": "_ctx", "type": "statement" }, { "name": "_get_member_names", "type": "function" }, { "name": "_get_member_names_to_uris", "type": "function" }, { "name": "_get_member_uris", "type": "function" }, { "name": "_indent", "type": "statement" }, { "name": "_object_type", "type": "function" }, { "name": "_open", "type": "function" }, { "name": "_open_withlessly", "type": "function" }, { "name": "_remove_object", "type": "function" }, { "name": "_soma_options", "type": "statement" }, { "name": "_verbose", "type": "statement" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.
586
19
3,191
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
inproject
from_dataframe
true
function
18
24
false
true
[ "to_dataframe", "uri", "from_dataframe", "df", "name", "attr_names", "attr_names_to_types", "attribute_filter", "dim_name", "dim_names", "dim_names_to_types", "dim_select", "exists", "has_attr_name", "ids", "keys", "shape", "tiledb_array_schema", "__init__", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "attr_names", "type": "function" }, { "name": "attr_names_to_types", "type": "function" }, { "name": "attribute_filter", "type": "function" }, { "name": "df", "type": "function" }, { "name": "dim_name", "type": "statement" }, { "name": "dim_names", "type": "function" }, { "name": "dim_names_to_types", "type": "function" }, { "name": "dim_select", "type": "function" }, { "name": "exists", "type": "function" }, { "name": "from_dataframe", "type": "function" }, { "name": "has_attr_name", "type": "function" }, { "name": "ids", "type": "function" }, { "name": "keys", "type": "function" }, { "name": "name", "type": "statement" }, { "name": "shape", "type": "function" }, { "name": "tiledb_array_schema", "type": "function" }, { "name": "to_dataframe", "type": "function" }, { "name": "uri", "type": "statement" }, { "name": "_ctx", "type": "statement" }, { "name": "_indent", "type": "statement" }, { "name": "_object_type", "type": "function" }, { "name": "_open", "type": "function" }, { "name": "_soma_options", "type": "statement" }, { "name": "_verbose", "type": "statement" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.from_matrix_and_dim_values(anndata.X, anndata.obs.index, anndata.var.index) soma._add_object(soma.X) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obs.
590
19
3,286
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
inproject
from_dataframe
true
function
18
24
false
false
[ "to_dataframe", "uri", "from_dataframe", "df", "name", "attr_names", "attr_names_to_types", "attribute_filter", "dim_name", "dim_names", "dim_names_to_types", "dim_select", "exists", "has_attr_name", "ids", "keys", "shape", "tiledb_array_schema", "__init__", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "attr_names", "type": "function" }, { "name": "attr_names_to_types", "type": "function" }, { "name": "attribute_filter", "type": "function" }, { "name": "df", "type": "function" }, { "name": "dim_name", "type": "statement" }, { "name": "dim_names", "type": "function" }, { "name": "dim_names_to_types", "type": "function" }, { "name": "dim_select", "type": "function" }, { "name": "exists", "type": "function" }, { "name": "from_dataframe", "type": "function" }, { "name": "has_attr_name", "type": "function" }, { "name": "ids", "type": "function" }, { "name": "keys", "type": "function" }, { "name": "name", "type": "statement" }, { "name": "shape", "type": "function" }, { "name": "tiledb_array_schema", "type": "function" }, { "name": "to_dataframe", "type": "function" }, { "name": "uri", "type": "statement" }, { "name": "_ctx", "type": "statement" }, { "name": "_indent", "type": "statement" }, { "name": "_object_type", "type": "function" }, { "name": "_open", "type": "function" }, { "name": "_soma_options", "type": "statement" }, { "name": "_verbose", "type": "statement" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.from_matrix_and_dim_values(anndata.X, anndata.obs.index, anndata.var.index) soma._add_object(soma.X) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obs.from_dataframe(dataframe=anndata.obs, extent=256) soma._add_object(soma.obs) soma.var.
594
19
3,453
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
Unknown
from_matrices_and_dim_values
true
function
7
20
false
true
[ "to_dict_of_csr", "uri", "from_matrices_and_dim_values", "name", "keys", "dim_name", "exists", "__contains__", "__getitem__", "__init__", "__iter__", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "dim_name", "type": "statement" }, { "name": "exists", "type": "function" }, { "name": "from_matrices_and_dim_values", "type": "function" }, { "name": "keys", "type": "function" }, { "name": "name", "type": "statement" }, { "name": "to_dict_of_csr", "type": "function" }, { "name": "uri", "type": "statement" }, { "name": "_add_object", "type": "function" }, { "name": "_create", "type": "function" }, { "name": "_ctx", "type": "statement" }, { "name": "_get_member_names", "type": "function" }, { "name": "_get_member_names_to_uris", "type": "function" }, { "name": "_get_member_uris", "type": "function" }, { "name": "_indent", "type": "statement" }, { "name": "_object_type", "type": "function" }, { "name": "_open", "type": "function" }, { "name": "_open_withlessly", "type": "function" }, { "name": "_remove_object", "type": "function" }, { "name": "_soma_options", "type": "statement" }, { "name": "_verbose", "type": "statement" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__contains__", "type": "function" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__getitem__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__iter__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.from_matrix_and_dim_values(anndata.X, anndata.obs.index, anndata.var.index) soma._add_object(soma.X) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obs.from_dataframe(dataframe=anndata.obs, extent=256) soma._add_object(soma.obs) soma.var.from_dataframe(dataframe=anndata.var, extent=2048) soma._add_object(soma.var) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obsm.
598
19
3,562
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
Unknown
from_matrices_and_dim_values
true
function
7
20
false
false
[ "to_dict_of_csr", "uri", "from_matrices_and_dim_values", "name", "keys", "dim_name", "exists", "__contains__", "__getitem__", "__init__", "__iter__", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "dim_name", "type": "statement" }, { "name": "exists", "type": "function" }, { "name": "from_matrices_and_dim_values", "type": "function" }, { "name": "keys", "type": "function" }, { "name": "name", "type": "statement" }, { "name": "to_dict_of_csr", "type": "function" }, { "name": "uri", "type": "statement" }, { "name": "_add_object", "type": "function" }, { "name": "_create", "type": "function" }, { "name": "_ctx", "type": "statement" }, { "name": "_get_member_names", "type": "function" }, { "name": "_get_member_names_to_uris", "type": "function" }, { "name": "_get_member_uris", "type": "function" }, { "name": "_indent", "type": "statement" }, { "name": "_object_type", "type": "function" }, { "name": "_open", "type": "function" }, { "name": "_open_withlessly", "type": "function" }, { "name": "_remove_object", "type": "function" }, { "name": "_soma_options", "type": "statement" }, { "name": "_verbose", "type": "statement" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__contains__", "type": "function" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__getitem__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__iter__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.from_matrix_and_dim_values(anndata.X, anndata.obs.index, anndata.var.index) soma._add_object(soma.X) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obs.from_dataframe(dataframe=anndata.obs, extent=256) soma._add_object(soma.obs) soma.var.from_dataframe(dataframe=anndata.var, extent=2048) soma._add_object(soma.var) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obsm.from_matrices_and_dim_values(anndata.obsm, anndata.obs_names) soma._add_object(soma.obsm) soma.varm.
602
19
3,671
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
Unknown
from_matrices_and_dim_values
true
function
8
21
false
false
[ "uri", "from_matrices_and_dim_values", "to_dict_of_csr", "name", "keys", "col_dim_name", "exists", "row_dim_name", "__contains__", "__getitem__", "__init__", "__iter__", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "col_dim_name", "type": "statement" }, { "name": "exists", "type": "function" }, { "name": "from_matrices_and_dim_values", "type": "function" }, { "name": "keys", "type": "function" }, { "name": "name", "type": "statement" }, { "name": "row_dim_name", "type": "statement" }, { "name": "to_dict_of_csr", "type": "function" }, { "name": "uri", "type": "statement" }, { "name": "_add_object", "type": "function" }, { "name": "_create", "type": "function" }, { "name": "_ctx", "type": "statement" }, { "name": "_get_member_names", "type": "function" }, { "name": "_get_member_names_to_uris", "type": "function" }, { "name": "_get_member_uris", "type": "function" }, { "name": "_indent", "type": "statement" }, { "name": "_object_type", "type": "function" }, { "name": "_open", "type": "function" }, { "name": "_open_withlessly", "type": "function" }, { "name": "_remove_object", "type": "function" }, { "name": "_soma_options", "type": "statement" }, { "name": "_verbose", "type": "statement" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__contains__", "type": "function" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__getitem__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__iter__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.from_matrix_and_dim_values(anndata.X, anndata.obs.index, anndata.var.index) soma._add_object(soma.X) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obs.from_dataframe(dataframe=anndata.obs, extent=256) soma._add_object(soma.obs) soma.var.from_dataframe(dataframe=anndata.var, extent=2048) soma._add_object(soma.var) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obsm.from_matrices_and_dim_values(anndata.obsm, anndata.obs_names) soma._add_object(soma.obsm) soma.varm.from_matrices_and_dim_values(anndata.varm, anndata.var_names) soma._add_object(soma.varm) soma.obsp.
606
19
3,780
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
Unknown
from_matrices_and_dim_values
true
function
8
21
false
false
[ "uri", "from_matrices_and_dim_values", "to_dict_of_csr", "name", "keys", "col_dim_name", "exists", "row_dim_name", "__contains__", "__getitem__", "__init__", "__iter__", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "col_dim_name", "type": "statement" }, { "name": "exists", "type": "function" }, { "name": "from_matrices_and_dim_values", "type": "function" }, { "name": "keys", "type": "function" }, { "name": "name", "type": "statement" }, { "name": "row_dim_name", "type": "statement" }, { "name": "to_dict_of_csr", "type": "function" }, { "name": "uri", "type": "statement" }, { "name": "_add_object", "type": "function" }, { "name": "_create", "type": "function" }, { "name": "_ctx", "type": "statement" }, { "name": "_get_member_names", "type": "function" }, { "name": "_get_member_names_to_uris", "type": "function" }, { "name": "_get_member_uris", "type": "function" }, { "name": "_indent", "type": "statement" }, { "name": "_object_type", "type": "function" }, { "name": "_open", "type": "function" }, { "name": "_open_withlessly", "type": "function" }, { "name": "_remove_object", "type": "function" }, { "name": "_soma_options", "type": "statement" }, { "name": "_verbose", "type": "statement" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__contains__", "type": "function" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__getitem__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__iter__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.from_matrix_and_dim_values(anndata.X, anndata.obs.index, anndata.var.index) soma._add_object(soma.X) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obs.from_dataframe(dataframe=anndata.obs, extent=256) soma._add_object(soma.obs) soma.var.from_dataframe(dataframe=anndata.var, extent=2048) soma._add_object(soma.var) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obsm.from_matrices_and_dim_values(anndata.obsm, anndata.obs_names) soma._add_object(soma.obsm) soma.varm.from_matrices_and_dim_values(anndata.varm, anndata.var_names) soma._add_object(soma.varm) soma.obsp.from_matrices_and_dim_values(anndata.obsp, anndata.obs_names) soma._add_object(soma.obsp) soma.varp.
610
19
3,990
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
infile
from_anndata
true
function
9
22
false
true
[ "to_anndata_raw", "var", "X", "from_anndata", "uri", "exists", "name", "varm", "varp", "__init__", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "exists", "type": "function" }, { "name": "from_anndata", "type": "function" }, { "name": "name", "type": "statement" }, { "name": "to_anndata_raw", "type": "function" }, { "name": "uri", "type": "statement" }, { "name": "var", "type": "statement" }, { "name": "varm", "type": "statement" }, { "name": "varp", "type": "statement" }, { "name": "X", "type": "statement" }, { "name": "_add_object", "type": "function" }, { "name": "_create", "type": "function" }, { "name": "_ctx", "type": "statement" }, { "name": "_get_member_names", "type": "function" }, { "name": "_get_member_names_to_uris", "type": "function" }, { "name": "_get_member_uris", "type": "function" }, { "name": "_indent", "type": "statement" }, { "name": "_object_type", "type": "function" }, { "name": "_open", "type": "function" }, { "name": "_open_withlessly", "type": "function" }, { "name": "_remove_object", "type": "function" }, { "name": "_soma_options", "type": "statement" }, { "name": "_verbose", "type": "statement" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.from_matrix_and_dim_values(anndata.X, anndata.obs.index, anndata.var.index) soma._add_object(soma.X) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obs.from_dataframe(dataframe=anndata.obs, extent=256) soma._add_object(soma.obs) soma.var.from_dataframe(dataframe=anndata.var, extent=2048) soma._add_object(soma.var) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obsm.from_matrices_and_dim_values(anndata.obsm, anndata.obs_names) soma._add_object(soma.obsm) soma.varm.from_matrices_and_dim_values(anndata.varm, anndata.var_names) soma._add_object(soma.varm) soma.obsp.from_matrices_and_dim_values(anndata.obsp, anndata.obs_names) soma._add_object(soma.obsp) soma.varp.from_matrices_and_dim_values(anndata.varp, anndata.var_names) soma._add_object(soma.varp) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.raw != None: soma.raw.
614
19
4,163
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
inproject
from_anndata_uns
true
function
6
19
false
true
[ "to_dict_of_matrices", "uri", "from_anndata_uns", "name", "keys", "exists", "__contains__", "__getitem__", "__init__", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "exists", "type": "function" }, { "name": "from_anndata_uns", "type": "function" }, { "name": "keys", "type": "function" }, { "name": "name", "type": "statement" }, { "name": "to_dict_of_matrices", "type": "function" }, { "name": "uri", "type": "statement" }, { "name": "_add_object", "type": "function" }, { "name": "_create", "type": "function" }, { "name": "_ctx", "type": "statement" }, { "name": "_get_member_names", "type": "function" }, { "name": "_get_member_names_to_uris", "type": "function" }, { "name": "_get_member_uris", "type": "function" }, { "name": "_indent", "type": "statement" }, { "name": "_object_type", "type": "function" }, { "name": "_open", "type": "function" }, { "name": "_open_withlessly", "type": "function" }, { "name": "_remove_object", "type": "function" }, { "name": "_soma_options", "type": "statement" }, { "name": "_verbose", "type": "statement" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__contains__", "type": "function" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__getitem__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.from_matrix_and_dim_values(anndata.X, anndata.obs.index, anndata.var.index) soma._add_object(soma.X) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obs.from_dataframe(dataframe=anndata.obs, extent=256) soma._add_object(soma.obs) soma.var.from_dataframe(dataframe=anndata.var, extent=2048) soma._add_object(soma.var) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obsm.from_matrices_and_dim_values(anndata.obsm, anndata.obs_names) soma._add_object(soma.obsm) soma.varm.from_matrices_and_dim_values(anndata.varm, anndata.var_names) soma._add_object(soma.varm) soma.obsp.from_matrices_and_dim_values(anndata.obsp, anndata.obs_names) soma._add_object(soma.obsp) soma.varp.from_matrices_and_dim_values(anndata.varp, anndata.var_names) soma._add_object(soma.varp) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.raw != None: soma.raw.from_anndata(anndata) soma._add_object(soma.raw) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.uns != None: soma.uns.
618
19
4,287
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
inproject
util
true
module
33
35
false
false
[ "soma", "util", "SOMA", "util_ann", "soma_options", "annotation_matrix", "annotation_matrix_group", "annotation_pairwise_matrix_group", "AnnotationMatrix", "AnnotationMatrixGroup", "AnnotationPairwiseMatrixGroup", "assay_matrix", "assay_matrix_group", "AssayMatrix", "AssayMatrixGroup", "describe_ann_file", "raw_group", "RawGroup", "show_single_cell_group", "soma_collection", "SOMACollection", "SOMAOptions", "tiledb_array", "tiledb_group", "tiledb_object", "TileDBArray", "TileDBGroup", "TileDBObject", "uns_array", "uns_group", "UnsArray", "UnsGroup", "util_tiledb" ]
[ { "name": "annotation_dataframe", "type": "module" }, { "name": "annotation_matrix", "type": "module" }, { "name": "annotation_matrix_group", "type": "module" }, { "name": "annotation_pairwise_matrix_group", "type": "module" }, { "name": "AnnotationMatrix", "type": "class" }, { "name": "AnnotationMatrixGroup", "type": "class" }, { "name": "AnnotationPairwiseMatrixGroup", "type": "class" }, { "name": "assay_matrix", "type": "module" }, { "name": "assay_matrix_group", "type": "module" }, { "name": "AssayMatrix", "type": "class" }, { "name": "AssayMatrixGroup", "type": "class" }, { "name": "describe_ann_file", "type": "function" }, { "name": "io", "type": "module" }, { "name": "raw_group", "type": "module" }, { "name": "RawGroup", "type": "class" }, { "name": "show_single_cell_group", "type": "function" }, { "name": "SOMA", "type": "class" }, { "name": "soma", "type": "module" }, { "name": "soma_collection", "type": "module" }, { "name": "soma_options", "type": "module" }, { "name": "SOMACollection", "type": "class" }, { "name": "SOMAOptions", "type": "class" }, { "name": "tiledb_array", "type": "module" }, { "name": "tiledb_group", "type": "module" }, { "name": "tiledb_object", "type": "module" }, { "name": "TileDBArray", "type": "class" }, { "name": "TileDBGroup", "type": "class" }, { "name": "TileDBObject", "type": "class" }, { "name": "uns_array", "type": "module" }, { "name": "uns_group", "type": "module" }, { "name": "UnsArray", "type": "class" }, { "name": "UnsGroup", "type": "class" }, { "name": "util", "type": "module" }, { "name": "util_ann", "type": "module" }, { "name": "util_tiledb", "type": "module" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.from_matrix_and_dim_values(anndata.X, anndata.obs.index, anndata.var.index) soma._add_object(soma.X) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obs.from_dataframe(dataframe=anndata.obs, extent=256) soma._add_object(soma.obs) soma.var.from_dataframe(dataframe=anndata.var, extent=2048) soma._add_object(soma.var) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obsm.from_matrices_and_dim_values(anndata.obsm, anndata.obs_names) soma._add_object(soma.obsm) soma.varm.from_matrices_and_dim_values(anndata.varm, anndata.var_names) soma._add_object(soma.varm) soma.obsp.from_matrices_and_dim_values(anndata.obsp, anndata.obs_names) soma._add_object(soma.obsp) soma.varp.from_matrices_and_dim_values(anndata.varp, anndata.var_names) soma._add_object(soma.varp) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.raw != None: soma.raw.from_anndata(anndata) soma._add_object(soma.raw) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.uns != None: soma.uns.from_anndata_uns(anndata.uns) soma._add_object(soma.uns) if soma._verbose: print( tiledbsc.
619
19
4,292
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
inproject
format_elapsed
true
function
10
16
false
false
[ "format_elapsed", "get_start_stamp", "List", "Optional", "ETATracker", "_find_csc_chunk_size", "_find_csr_chunk_size", "_get_sort_and_permutation", "_to_tiledb_supported_array_type", "_X_and_ids_to_coo" ]
[ { "name": "ad", "type": "module" }, { "name": "ETATracker", "type": "class" }, { "name": "format_elapsed", "type": "function" }, { "name": "get_start_stamp", "type": "function" }, { "name": "List", "type": "class" }, { "name": "numpy", "type": "module" }, { "name": "Optional", "type": "class" }, { "name": "pd", "type": "module" }, { "name": "scipy", "type": "module" }, { "name": "tiledb", "type": "module" }, { "name": "time", "type": "module" }, { "name": "_find_csc_chunk_size", "type": "function" }, { "name": "_find_csr_chunk_size", "type": "function" }, { "name": "_get_sort_and_permutation", "type": "function" }, { "name": "_to_tiledb_supported_array_type", "type": "function" }, { "name": "_X_and_ids_to_coo", "type": "function" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.from_matrix_and_dim_values(anndata.X, anndata.obs.index, anndata.var.index) soma._add_object(soma.X) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obs.from_dataframe(dataframe=anndata.obs, extent=256) soma._add_object(soma.obs) soma.var.from_dataframe(dataframe=anndata.var, extent=2048) soma._add_object(soma.var) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obsm.from_matrices_and_dim_values(anndata.obsm, anndata.obs_names) soma._add_object(soma.obsm) soma.varm.from_matrices_and_dim_values(anndata.varm, anndata.var_names) soma._add_object(soma.varm) soma.obsp.from_matrices_and_dim_values(anndata.obsp, anndata.obs_names) soma._add_object(soma.obsp) soma.varp.from_matrices_and_dim_values(anndata.varp, anndata.var_names) soma._add_object(soma.varp) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.raw != None: soma.raw.from_anndata(anndata) soma._add_object(soma.raw) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.uns != None: soma.uns.from_anndata_uns(anndata.uns) soma._add_object(soma.uns) if soma._verbose: print( tiledbsc.util.
622
19
4,460
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
Unknown
SOMA
true
class
34
35
false
false
[ "SOMA", "util", "soma", "None", "util_ann", "annotation_matrix", "annotation_matrix_group", "annotation_pairwise_matrix_group", "AnnotationMatrix", "AnnotationMatrixGroup", "AnnotationPairwiseMatrixGroup", "assay_matrix", "assay_matrix_group", "AssayMatrix", "AssayMatrixGroup", "describe_ann_file", "raw_group", "RawGroup", "show_single_cell_group", "soma_collection", "soma_options", "SOMACollection", "SOMAOptions", "tiledb_array", "tiledb_group", "tiledb_object", "TileDBArray", "TileDBGroup", "TileDBObject", "uns_array", "uns_group", "UnsArray", "UnsGroup", "util_tiledb" ]
[ { "name": "annotation_dataframe", "type": "module" }, { "name": "annotation_matrix", "type": "module" }, { "name": "annotation_matrix_group", "type": "module" }, { "name": "annotation_pairwise_matrix_group", "type": "module" }, { "name": "AnnotationMatrix", "type": "class" }, { "name": "AnnotationMatrixGroup", "type": "class" }, { "name": "AnnotationPairwiseMatrixGroup", "type": "class" }, { "name": "assay_matrix", "type": "module" }, { "name": "assay_matrix_group", "type": "module" }, { "name": "AssayMatrix", "type": "class" }, { "name": "AssayMatrixGroup", "type": "class" }, { "name": "describe_ann_file", "type": "function" }, { "name": "io", "type": "module" }, { "name": "raw_group", "type": "module" }, { "name": "RawGroup", "type": "class" }, { "name": "show_single_cell_group", "type": "function" }, { "name": "SOMA", "type": "class" }, { "name": "soma", "type": "module" }, { "name": "soma_collection", "type": "module" }, { "name": "soma_options", "type": "module" }, { "name": "SOMACollection", "type": "class" }, { "name": "SOMAOptions", "type": "class" }, { "name": "tiledb_array", "type": "module" }, { "name": "tiledb_group", "type": "module" }, { "name": "tiledb_object", "type": "module" }, { "name": "TileDBArray", "type": "class" }, { "name": "TileDBGroup", "type": "class" }, { "name": "TileDBObject", "type": "class" }, { "name": "uns_array", "type": "module" }, { "name": "uns_group", "type": "module" }, { "name": "UnsArray", "type": "class" }, { "name": "UnsGroup", "type": "class" }, { "name": "util", "type": "module" }, { "name": "util_ann", "type": "module" }, { "name": "util_tiledb", "type": "module" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.from_matrix_and_dim_values(anndata.X, anndata.obs.index, anndata.var.index) soma._add_object(soma.X) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obs.from_dataframe(dataframe=anndata.obs, extent=256) soma._add_object(soma.obs) soma.var.from_dataframe(dataframe=anndata.var, extent=2048) soma._add_object(soma.var) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obsm.from_matrices_and_dim_values(anndata.obsm, anndata.obs_names) soma._add_object(soma.obsm) soma.varm.from_matrices_and_dim_values(anndata.varm, anndata.var_names) soma._add_object(soma.varm) soma.obsp.from_matrices_and_dim_values(anndata.obsp, anndata.obs_names) soma._add_object(soma.obsp) soma.varp.from_matrices_and_dim_values(anndata.varp, anndata.var_names) soma._add_object(soma.varp) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.raw != None: soma.raw.from_anndata(anndata) soma._add_object(soma.raw) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.uns != None: soma.uns.from_anndata_uns(anndata.uns) soma._add_object(soma.uns) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH WRITING {soma.uri}") ) # ---------------------------------------------------------------- def to_h5ad(soma: tiledbsc.
624
19
4,694
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
Unknown
util
true
module
33
35
false
false
[ "util", "soma", "SOMA", "util_ann", "SOMAOptions", "annotation_matrix", "annotation_matrix_group", "annotation_pairwise_matrix_group", "AnnotationMatrix", "AnnotationMatrixGroup", "AnnotationPairwiseMatrixGroup", "assay_matrix", "assay_matrix_group", "AssayMatrix", "AssayMatrixGroup", "describe_ann_file", "raw_group", "RawGroup", "show_single_cell_group", "soma_collection", "soma_options", "SOMACollection", "tiledb_array", "tiledb_group", "tiledb_object", "TileDBArray", "TileDBGroup", "TileDBObject", "uns_array", "uns_group", "UnsArray", "UnsGroup", "util_tiledb" ]
[ { "name": "annotation_dataframe", "type": "module" }, { "name": "annotation_matrix", "type": "module" }, { "name": "annotation_matrix_group", "type": "module" }, { "name": "annotation_pairwise_matrix_group", "type": "module" }, { "name": "AnnotationMatrix", "type": "class" }, { "name": "AnnotationMatrixGroup", "type": "class" }, { "name": "AnnotationPairwiseMatrixGroup", "type": "class" }, { "name": "assay_matrix", "type": "module" }, { "name": "assay_matrix_group", "type": "module" }, { "name": "AssayMatrix", "type": "class" }, { "name": "AssayMatrixGroup", "type": "class" }, { "name": "describe_ann_file", "type": "function" }, { "name": "io", "type": "module" }, { "name": "raw_group", "type": "module" }, { "name": "RawGroup", "type": "class" }, { "name": "show_single_cell_group", "type": "function" }, { "name": "SOMA", "type": "class" }, { "name": "soma", "type": "module" }, { "name": "soma_collection", "type": "module" }, { "name": "soma_options", "type": "module" }, { "name": "SOMACollection", "type": "class" }, { "name": "SOMAOptions", "type": "class" }, { "name": "tiledb_array", "type": "module" }, { "name": "tiledb_group", "type": "module" }, { "name": "tiledb_object", "type": "module" }, { "name": "TileDBArray", "type": "class" }, { "name": "TileDBGroup", "type": "class" }, { "name": "TileDBObject", "type": "class" }, { "name": "uns_array", "type": "module" }, { "name": "uns_group", "type": "module" }, { "name": "UnsArray", "type": "class" }, { "name": "UnsGroup", "type": "class" }, { "name": "util", "type": "module" }, { "name": "util_ann", "type": "module" }, { "name": "util_tiledb", "type": "module" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.from_matrix_and_dim_values(anndata.X, anndata.obs.index, anndata.var.index) soma._add_object(soma.X) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obs.from_dataframe(dataframe=anndata.obs, extent=256) soma._add_object(soma.obs) soma.var.from_dataframe(dataframe=anndata.var, extent=2048) soma._add_object(soma.var) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obsm.from_matrices_and_dim_values(anndata.obsm, anndata.obs_names) soma._add_object(soma.obsm) soma.varm.from_matrices_and_dim_values(anndata.varm, anndata.var_names) soma._add_object(soma.varm) soma.obsp.from_matrices_and_dim_values(anndata.obsp, anndata.obs_names) soma._add_object(soma.obsp) soma.varp.from_matrices_and_dim_values(anndata.varp, anndata.var_names) soma._add_object(soma.varp) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.raw != None: soma.raw.from_anndata(anndata) soma._add_object(soma.raw) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.uns != None: soma.uns.from_anndata_uns(anndata.uns) soma._add_object(soma.uns) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH WRITING {soma.uri}") ) # ---------------------------------------------------------------- def to_h5ad(soma: tiledbsc.SOMA, h5ad_path: str) -> None: """ Converts the soma group to anndata format and writes it to the specified .h5ad file. As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.
625
19
4,699
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
Unknown
get_start_stamp
true
function
10
16
false
false
[ "format_elapsed", "get_start_stamp", "List", "Optional", "ETATracker", "_find_csc_chunk_size", "_find_csr_chunk_size", "_get_sort_and_permutation", "_to_tiledb_supported_array_type", "_X_and_ids_to_coo" ]
[ { "name": "ad", "type": "module" }, { "name": "ETATracker", "type": "class" }, { "name": "format_elapsed", "type": "function" }, { "name": "get_start_stamp", "type": "function" }, { "name": "List", "type": "class" }, { "name": "numpy", "type": "module" }, { "name": "Optional", "type": "class" }, { "name": "pd", "type": "module" }, { "name": "scipy", "type": "module" }, { "name": "tiledb", "type": "module" }, { "name": "time", "type": "module" }, { "name": "_find_csc_chunk_size", "type": "function" }, { "name": "_find_csr_chunk_size", "type": "function" }, { "name": "_get_sort_and_permutation", "type": "function" }, { "name": "_to_tiledb_supported_array_type", "type": "function" }, { "name": "_X_and_ids_to_coo", "type": "function" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.from_matrix_and_dim_values(anndata.X, anndata.obs.index, anndata.var.index) soma._add_object(soma.X) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obs.from_dataframe(dataframe=anndata.obs, extent=256) soma._add_object(soma.obs) soma.var.from_dataframe(dataframe=anndata.var, extent=2048) soma._add_object(soma.var) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obsm.from_matrices_and_dim_values(anndata.obsm, anndata.obs_names) soma._add_object(soma.obsm) soma.varm.from_matrices_and_dim_values(anndata.varm, anndata.var_names) soma._add_object(soma.varm) soma.obsp.from_matrices_and_dim_values(anndata.obsp, anndata.obs_names) soma._add_object(soma.obsp) soma.varp.from_matrices_and_dim_values(anndata.varp, anndata.var_names) soma._add_object(soma.varp) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.raw != None: soma.raw.from_anndata(anndata) soma._add_object(soma.raw) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.uns != None: soma.uns.from_anndata_uns(anndata.uns) soma._add_object(soma.uns) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH WRITING {soma.uri}") ) # ---------------------------------------------------------------- def to_h5ad(soma: tiledbsc.SOMA, h5ad_path: str) -> None: """ Converts the soma group to anndata format and writes it to the specified .h5ad file. As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.
628
19
4,858
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
inproject
util
true
module
33
35
false
false
[ "util", "soma", "SOMA", "util_ann", "SOMAOptions", "annotation_matrix", "annotation_matrix_group", "annotation_pairwise_matrix_group", "AnnotationMatrix", "AnnotationMatrixGroup", "AnnotationPairwiseMatrixGroup", "assay_matrix", "assay_matrix_group", "AssayMatrix", "AssayMatrixGroup", "describe_ann_file", "raw_group", "RawGroup", "show_single_cell_group", "soma_collection", "soma_options", "SOMACollection", "tiledb_array", "tiledb_group", "tiledb_object", "TileDBArray", "TileDBGroup", "TileDBObject", "uns_array", "uns_group", "UnsArray", "UnsGroup", "util_tiledb" ]
[ { "name": "annotation_dataframe", "type": "module" }, { "name": "annotation_matrix", "type": "module" }, { "name": "annotation_matrix_group", "type": "module" }, { "name": "annotation_pairwise_matrix_group", "type": "module" }, { "name": "AnnotationMatrix", "type": "class" }, { "name": "AnnotationMatrixGroup", "type": "class" }, { "name": "AnnotationPairwiseMatrixGroup", "type": "class" }, { "name": "assay_matrix", "type": "module" }, { "name": "assay_matrix_group", "type": "module" }, { "name": "AssayMatrix", "type": "class" }, { "name": "AssayMatrixGroup", "type": "class" }, { "name": "describe_ann_file", "type": "function" }, { "name": "io", "type": "module" }, { "name": "raw_group", "type": "module" }, { "name": "RawGroup", "type": "class" }, { "name": "show_single_cell_group", "type": "function" }, { "name": "SOMA", "type": "class" }, { "name": "soma", "type": "module" }, { "name": "soma_collection", "type": "module" }, { "name": "soma_options", "type": "module" }, { "name": "SOMACollection", "type": "class" }, { "name": "SOMAOptions", "type": "class" }, { "name": "tiledb_array", "type": "module" }, { "name": "tiledb_group", "type": "module" }, { "name": "tiledb_object", "type": "module" }, { "name": "TileDBArray", "type": "class" }, { "name": "TileDBGroup", "type": "class" }, { "name": "TileDBObject", "type": "class" }, { "name": "uns_array", "type": "module" }, { "name": "uns_group", "type": "module" }, { "name": "UnsArray", "type": "class" }, { "name": "UnsGroup", "type": "class" }, { "name": "util", "type": "module" }, { "name": "util_ann", "type": "module" }, { "name": "util_tiledb", "type": "module" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.from_matrix_and_dim_values(anndata.X, anndata.obs.index, anndata.var.index) soma._add_object(soma.X) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obs.from_dataframe(dataframe=anndata.obs, extent=256) soma._add_object(soma.obs) soma.var.from_dataframe(dataframe=anndata.var, extent=2048) soma._add_object(soma.var) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obsm.from_matrices_and_dim_values(anndata.obsm, anndata.obs_names) soma._add_object(soma.obsm) soma.varm.from_matrices_and_dim_values(anndata.varm, anndata.var_names) soma._add_object(soma.varm) soma.obsp.from_matrices_and_dim_values(anndata.obsp, anndata.obs_names) soma._add_object(soma.obsp) soma.varp.from_matrices_and_dim_values(anndata.varp, anndata.var_names) soma._add_object(soma.varp) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.raw != None: soma.raw.from_anndata(anndata) soma._add_object(soma.raw) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.uns != None: soma.uns.from_anndata_uns(anndata.uns) soma._add_object(soma.uns) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH WRITING {soma.uri}") ) # ---------------------------------------------------------------- def to_h5ad(soma: tiledbsc.SOMA, h5ad_path: str) -> None: """ Converts the soma group to anndata format and writes it to the specified .h5ad file. As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_h5ad {soma.uri} -> {h5ad_path}") anndata = to_anndata(soma) if soma._verbose: s2 = tiledbsc.
629
19
4,863
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
inproject
get_start_stamp
true
function
10
16
false
false
[ "format_elapsed", "get_start_stamp", "List", "Optional", "ETATracker", "_find_csc_chunk_size", "_find_csr_chunk_size", "_get_sort_and_permutation", "_to_tiledb_supported_array_type", "_X_and_ids_to_coo" ]
[ { "name": "ad", "type": "module" }, { "name": "ETATracker", "type": "class" }, { "name": "format_elapsed", "type": "function" }, { "name": "get_start_stamp", "type": "function" }, { "name": "List", "type": "class" }, { "name": "numpy", "type": "module" }, { "name": "Optional", "type": "class" }, { "name": "pd", "type": "module" }, { "name": "scipy", "type": "module" }, { "name": "tiledb", "type": "module" }, { "name": "time", "type": "module" }, { "name": "_find_csc_chunk_size", "type": "function" }, { "name": "_find_csr_chunk_size", "type": "function" }, { "name": "_get_sort_and_permutation", "type": "function" }, { "name": "_to_tiledb_supported_array_type", "type": "function" }, { "name": "_X_and_ids_to_coo", "type": "function" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.from_matrix_and_dim_values(anndata.X, anndata.obs.index, anndata.var.index) soma._add_object(soma.X) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obs.from_dataframe(dataframe=anndata.obs, extent=256) soma._add_object(soma.obs) soma.var.from_dataframe(dataframe=anndata.var, extent=2048) soma._add_object(soma.var) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obsm.from_matrices_and_dim_values(anndata.obsm, anndata.obs_names) soma._add_object(soma.obsm) soma.varm.from_matrices_and_dim_values(anndata.varm, anndata.var_names) soma._add_object(soma.varm) soma.obsp.from_matrices_and_dim_values(anndata.obsp, anndata.obs_names) soma._add_object(soma.obsp) soma.varp.from_matrices_and_dim_values(anndata.varp, anndata.var_names) soma._add_object(soma.varp) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.raw != None: soma.raw.from_anndata(anndata) soma._add_object(soma.raw) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.uns != None: soma.uns.from_anndata_uns(anndata.uns) soma._add_object(soma.uns) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH WRITING {soma.uri}") ) # ---------------------------------------------------------------- def to_h5ad(soma: tiledbsc.SOMA, h5ad_path: str) -> None: """ Converts the soma group to anndata format and writes it to the specified .h5ad file. As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_h5ad {soma.uri} -> {h5ad_path}") anndata = to_anndata(soma) if soma._verbose: s2 = tiledbsc.util.
632
19
5,030
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
inproject
util
true
module
33
35
false
false
[ "util", "soma", "SOMA", "util_ann", "soma_options", "annotation_matrix", "annotation_matrix_group", "annotation_pairwise_matrix_group", "AnnotationMatrix", "AnnotationMatrixGroup", "AnnotationPairwiseMatrixGroup", "assay_matrix", "assay_matrix_group", "AssayMatrix", "AssayMatrixGroup", "describe_ann_file", "raw_group", "RawGroup", "show_single_cell_group", "soma_collection", "SOMACollection", "SOMAOptions", "tiledb_array", "tiledb_group", "tiledb_object", "TileDBArray", "TileDBGroup", "TileDBObject", "uns_array", "uns_group", "UnsArray", "UnsGroup", "util_tiledb" ]
[ { "name": "annotation_dataframe", "type": "module" }, { "name": "annotation_matrix", "type": "module" }, { "name": "annotation_matrix_group", "type": "module" }, { "name": "annotation_pairwise_matrix_group", "type": "module" }, { "name": "AnnotationMatrix", "type": "class" }, { "name": "AnnotationMatrixGroup", "type": "class" }, { "name": "AnnotationPairwiseMatrixGroup", "type": "class" }, { "name": "assay_matrix", "type": "module" }, { "name": "assay_matrix_group", "type": "module" }, { "name": "AssayMatrix", "type": "class" }, { "name": "AssayMatrixGroup", "type": "class" }, { "name": "describe_ann_file", "type": "function" }, { "name": "io", "type": "module" }, { "name": "raw_group", "type": "module" }, { "name": "RawGroup", "type": "class" }, { "name": "show_single_cell_group", "type": "function" }, { "name": "SOMA", "type": "class" }, { "name": "soma", "type": "module" }, { "name": "soma_collection", "type": "module" }, { "name": "soma_options", "type": "module" }, { "name": "SOMACollection", "type": "class" }, { "name": "SOMAOptions", "type": "class" }, { "name": "tiledb_array", "type": "module" }, { "name": "tiledb_group", "type": "module" }, { "name": "tiledb_object", "type": "module" }, { "name": "TileDBArray", "type": "class" }, { "name": "TileDBGroup", "type": "class" }, { "name": "TileDBObject", "type": "class" }, { "name": "uns_array", "type": "module" }, { "name": "uns_group", "type": "module" }, { "name": "UnsArray", "type": "class" }, { "name": "UnsGroup", "type": "class" }, { "name": "util", "type": "module" }, { "name": "util_ann", "type": "module" }, { "name": "util_tiledb", "type": "module" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.from_matrix_and_dim_values(anndata.X, anndata.obs.index, anndata.var.index) soma._add_object(soma.X) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obs.from_dataframe(dataframe=anndata.obs, extent=256) soma._add_object(soma.obs) soma.var.from_dataframe(dataframe=anndata.var, extent=2048) soma._add_object(soma.var) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obsm.from_matrices_and_dim_values(anndata.obsm, anndata.obs_names) soma._add_object(soma.obsm) soma.varm.from_matrices_and_dim_values(anndata.varm, anndata.var_names) soma._add_object(soma.varm) soma.obsp.from_matrices_and_dim_values(anndata.obsp, anndata.obs_names) soma._add_object(soma.obsp) soma.varp.from_matrices_and_dim_values(anndata.varp, anndata.var_names) soma._add_object(soma.varp) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.raw != None: soma.raw.from_anndata(anndata) soma._add_object(soma.raw) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.uns != None: soma.uns.from_anndata_uns(anndata.uns) soma._add_object(soma.uns) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH WRITING {soma.uri}") ) # ---------------------------------------------------------------- def to_h5ad(soma: tiledbsc.SOMA, h5ad_path: str) -> None: """ Converts the soma group to anndata format and writes it to the specified .h5ad file. As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_h5ad {soma.uri} -> {h5ad_path}") anndata = to_anndata(soma) if soma._verbose: s2 = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START write {h5ad_path}") anndata.write_h5ad(h5ad_path) if soma._verbose: print( tiledbsc.
633
19
5,035
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
inproject
format_elapsed
true
function
10
16
false
false
[ "format_elapsed", "get_start_stamp", "List", "Optional", "ETATracker", "_find_csc_chunk_size", "_find_csr_chunk_size", "_get_sort_and_permutation", "_to_tiledb_supported_array_type", "_X_and_ids_to_coo" ]
[ { "name": "ad", "type": "module" }, { "name": "ETATracker", "type": "class" }, { "name": "format_elapsed", "type": "function" }, { "name": "get_start_stamp", "type": "function" }, { "name": "List", "type": "class" }, { "name": "numpy", "type": "module" }, { "name": "Optional", "type": "class" }, { "name": "pd", "type": "module" }, { "name": "scipy", "type": "module" }, { "name": "tiledb", "type": "module" }, { "name": "time", "type": "module" }, { "name": "_find_csc_chunk_size", "type": "function" }, { "name": "_find_csr_chunk_size", "type": "function" }, { "name": "_get_sort_and_permutation", "type": "function" }, { "name": "_to_tiledb_supported_array_type", "type": "function" }, { "name": "_X_and_ids_to_coo", "type": "function" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.from_matrix_and_dim_values(anndata.X, anndata.obs.index, anndata.var.index) soma._add_object(soma.X) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obs.from_dataframe(dataframe=anndata.obs, extent=256) soma._add_object(soma.obs) soma.var.from_dataframe(dataframe=anndata.var, extent=2048) soma._add_object(soma.var) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obsm.from_matrices_and_dim_values(anndata.obsm, anndata.obs_names) soma._add_object(soma.obsm) soma.varm.from_matrices_and_dim_values(anndata.varm, anndata.var_names) soma._add_object(soma.varm) soma.obsp.from_matrices_and_dim_values(anndata.obsp, anndata.obs_names) soma._add_object(soma.obsp) soma.varp.from_matrices_and_dim_values(anndata.varp, anndata.var_names) soma._add_object(soma.varp) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.raw != None: soma.raw.from_anndata(anndata) soma._add_object(soma.raw) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.uns != None: soma.uns.from_anndata_uns(anndata.uns) soma._add_object(soma.uns) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH WRITING {soma.uri}") ) # ---------------------------------------------------------------- def to_h5ad(soma: tiledbsc.SOMA, h5ad_path: str) -> None: """ Converts the soma group to anndata format and writes it to the specified .h5ad file. As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_h5ad {soma.uri} -> {h5ad_path}") anndata = to_anndata(soma) if soma._verbose: s2 = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START write {h5ad_path}") anndata.write_h5ad(h5ad_path) if soma._verbose: print( tiledbsc.util.
636
19
5,166
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
Unknown
util
true
module
33
35
false
false
[ "util", "soma", "SOMA", "util_ann", "soma_options", "annotation_matrix", "annotation_matrix_group", "annotation_pairwise_matrix_group", "AnnotationMatrix", "AnnotationMatrixGroup", "AnnotationPairwiseMatrixGroup", "assay_matrix", "assay_matrix_group", "AssayMatrix", "AssayMatrixGroup", "describe_ann_file", "raw_group", "RawGroup", "show_single_cell_group", "soma_collection", "SOMACollection", "SOMAOptions", "tiledb_array", "tiledb_group", "tiledb_object", "TileDBArray", "TileDBGroup", "TileDBObject", "uns_array", "uns_group", "UnsArray", "UnsGroup", "util_tiledb" ]
[ { "name": "annotation_dataframe", "type": "module" }, { "name": "annotation_matrix", "type": "module" }, { "name": "annotation_matrix_group", "type": "module" }, { "name": "annotation_pairwise_matrix_group", "type": "module" }, { "name": "AnnotationMatrix", "type": "class" }, { "name": "AnnotationMatrixGroup", "type": "class" }, { "name": "AnnotationPairwiseMatrixGroup", "type": "class" }, { "name": "assay_matrix", "type": "module" }, { "name": "assay_matrix_group", "type": "module" }, { "name": "AssayMatrix", "type": "class" }, { "name": "AssayMatrixGroup", "type": "class" }, { "name": "describe_ann_file", "type": "function" }, { "name": "io", "type": "module" }, { "name": "raw_group", "type": "module" }, { "name": "RawGroup", "type": "class" }, { "name": "show_single_cell_group", "type": "function" }, { "name": "SOMA", "type": "class" }, { "name": "soma", "type": "module" }, { "name": "soma_collection", "type": "module" }, { "name": "soma_options", "type": "module" }, { "name": "SOMACollection", "type": "class" }, { "name": "SOMAOptions", "type": "class" }, { "name": "tiledb_array", "type": "module" }, { "name": "tiledb_group", "type": "module" }, { "name": "tiledb_object", "type": "module" }, { "name": "TileDBArray", "type": "class" }, { "name": "TileDBGroup", "type": "class" }, { "name": "TileDBObject", "type": "class" }, { "name": "uns_array", "type": "module" }, { "name": "uns_group", "type": "module" }, { "name": "UnsArray", "type": "class" }, { "name": "UnsGroup", "type": "class" }, { "name": "util", "type": "module" }, { "name": "util_ann", "type": "module" }, { "name": "util_tiledb", "type": "module" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.from_matrix_and_dim_values(anndata.X, anndata.obs.index, anndata.var.index) soma._add_object(soma.X) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obs.from_dataframe(dataframe=anndata.obs, extent=256) soma._add_object(soma.obs) soma.var.from_dataframe(dataframe=anndata.var, extent=2048) soma._add_object(soma.var) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obsm.from_matrices_and_dim_values(anndata.obsm, anndata.obs_names) soma._add_object(soma.obsm) soma.varm.from_matrices_and_dim_values(anndata.varm, anndata.var_names) soma._add_object(soma.varm) soma.obsp.from_matrices_and_dim_values(anndata.obsp, anndata.obs_names) soma._add_object(soma.obsp) soma.varp.from_matrices_and_dim_values(anndata.varp, anndata.var_names) soma._add_object(soma.varp) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.raw != None: soma.raw.from_anndata(anndata) soma._add_object(soma.raw) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.uns != None: soma.uns.from_anndata_uns(anndata.uns) soma._add_object(soma.uns) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH WRITING {soma.uri}") ) # ---------------------------------------------------------------- def to_h5ad(soma: tiledbsc.SOMA, h5ad_path: str) -> None: """ Converts the soma group to anndata format and writes it to the specified .h5ad file. As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_h5ad {soma.uri} -> {h5ad_path}") anndata = to_anndata(soma) if soma._verbose: s2 = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START write {h5ad_path}") anndata.write_h5ad(h5ad_path) if soma._verbose: print( tiledbsc.util.format_elapsed(s2, f"{soma._indent}FINISH write {h5ad_path}") ) if soma._verbose: print( tiledbsc.
637
19
5,171
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
Unknown
format_elapsed
true
function
10
16
false
false
[ "format_elapsed", "get_start_stamp", "List", "Optional", "ETATracker", "_find_csc_chunk_size", "_find_csr_chunk_size", "_get_sort_and_permutation", "_to_tiledb_supported_array_type", "_X_and_ids_to_coo" ]
[ { "name": "ad", "type": "module" }, { "name": "ETATracker", "type": "class" }, { "name": "format_elapsed", "type": "function" }, { "name": "get_start_stamp", "type": "function" }, { "name": "List", "type": "class" }, { "name": "numpy", "type": "module" }, { "name": "Optional", "type": "class" }, { "name": "pd", "type": "module" }, { "name": "scipy", "type": "module" }, { "name": "tiledb", "type": "module" }, { "name": "time", "type": "module" }, { "name": "_find_csc_chunk_size", "type": "function" }, { "name": "_find_csr_chunk_size", "type": "function" }, { "name": "_get_sort_and_permutation", "type": "function" }, { "name": "_to_tiledb_supported_array_type", "type": "function" }, { "name": "_X_and_ids_to_coo", "type": "function" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.from_matrix_and_dim_values(anndata.X, anndata.obs.index, anndata.var.index) soma._add_object(soma.X) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obs.from_dataframe(dataframe=anndata.obs, extent=256) soma._add_object(soma.obs) soma.var.from_dataframe(dataframe=anndata.var, extent=2048) soma._add_object(soma.var) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obsm.from_matrices_and_dim_values(anndata.obsm, anndata.obs_names) soma._add_object(soma.obsm) soma.varm.from_matrices_and_dim_values(anndata.varm, anndata.var_names) soma._add_object(soma.varm) soma.obsp.from_matrices_and_dim_values(anndata.obsp, anndata.obs_names) soma._add_object(soma.obsp) soma.varp.from_matrices_and_dim_values(anndata.varp, anndata.var_names) soma._add_object(soma.varp) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.raw != None: soma.raw.from_anndata(anndata) soma._add_object(soma.raw) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.uns != None: soma.uns.from_anndata_uns(anndata.uns) soma._add_object(soma.uns) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH WRITING {soma.uri}") ) # ---------------------------------------------------------------- def to_h5ad(soma: tiledbsc.SOMA, h5ad_path: str) -> None: """ Converts the soma group to anndata format and writes it to the specified .h5ad file. As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_h5ad {soma.uri} -> {h5ad_path}") anndata = to_anndata(soma) if soma._verbose: s2 = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START write {h5ad_path}") anndata.write_h5ad(h5ad_path) if soma._verbose: print( tiledbsc.util.format_elapsed(s2, f"{soma._indent}FINISH write {h5ad_path}") ) if soma._verbose: print( tiledbsc.util.
639
19
5,378
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
Unknown
SOMA
true
class
34
35
false
false
[ "SOMA", "util", "soma", "None", "util_ann", "annotation_matrix", "annotation_matrix_group", "annotation_pairwise_matrix_group", "AnnotationMatrix", "AnnotationMatrixGroup", "AnnotationPairwiseMatrixGroup", "assay_matrix", "assay_matrix_group", "AssayMatrix", "AssayMatrixGroup", "describe_ann_file", "raw_group", "RawGroup", "show_single_cell_group", "soma_collection", "soma_options", "SOMACollection", "SOMAOptions", "tiledb_array", "tiledb_group", "tiledb_object", "TileDBArray", "TileDBGroup", "TileDBObject", "uns_array", "uns_group", "UnsArray", "UnsGroup", "util_tiledb" ]
[ { "name": "annotation_dataframe", "type": "module" }, { "name": "annotation_matrix", "type": "module" }, { "name": "annotation_matrix_group", "type": "module" }, { "name": "annotation_pairwise_matrix_group", "type": "module" }, { "name": "AnnotationMatrix", "type": "class" }, { "name": "AnnotationMatrixGroup", "type": "class" }, { "name": "AnnotationPairwiseMatrixGroup", "type": "class" }, { "name": "assay_matrix", "type": "module" }, { "name": "assay_matrix_group", "type": "module" }, { "name": "AssayMatrix", "type": "class" }, { "name": "AssayMatrixGroup", "type": "class" }, { "name": "describe_ann_file", "type": "function" }, { "name": "io", "type": "module" }, { "name": "raw_group", "type": "module" }, { "name": "RawGroup", "type": "class" }, { "name": "show_single_cell_group", "type": "function" }, { "name": "SOMA", "type": "class" }, { "name": "soma", "type": "module" }, { "name": "soma_collection", "type": "module" }, { "name": "soma_options", "type": "module" }, { "name": "SOMACollection", "type": "class" }, { "name": "SOMAOptions", "type": "class" }, { "name": "tiledb_array", "type": "module" }, { "name": "tiledb_group", "type": "module" }, { "name": "tiledb_object", "type": "module" }, { "name": "TileDBArray", "type": "class" }, { "name": "TileDBGroup", "type": "class" }, { "name": "TileDBObject", "type": "class" }, { "name": "uns_array", "type": "module" }, { "name": "uns_group", "type": "module" }, { "name": "UnsArray", "type": "class" }, { "name": "UnsGroup", "type": "class" }, { "name": "util", "type": "module" }, { "name": "util_ann", "type": "module" }, { "name": "util_tiledb", "type": "module" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.from_matrix_and_dim_values(anndata.X, anndata.obs.index, anndata.var.index) soma._add_object(soma.X) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obs.from_dataframe(dataframe=anndata.obs, extent=256) soma._add_object(soma.obs) soma.var.from_dataframe(dataframe=anndata.var, extent=2048) soma._add_object(soma.var) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obsm.from_matrices_and_dim_values(anndata.obsm, anndata.obs_names) soma._add_object(soma.obsm) soma.varm.from_matrices_and_dim_values(anndata.varm, anndata.var_names) soma._add_object(soma.varm) soma.obsp.from_matrices_and_dim_values(anndata.obsp, anndata.obs_names) soma._add_object(soma.obsp) soma.varp.from_matrices_and_dim_values(anndata.varp, anndata.var_names) soma._add_object(soma.varp) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.raw != None: soma.raw.from_anndata(anndata) soma._add_object(soma.raw) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.uns != None: soma.uns.from_anndata_uns(anndata.uns) soma._add_object(soma.uns) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH WRITING {soma.uri}") ) # ---------------------------------------------------------------- def to_h5ad(soma: tiledbsc.SOMA, h5ad_path: str) -> None: """ Converts the soma group to anndata format and writes it to the specified .h5ad file. As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_h5ad {soma.uri} -> {h5ad_path}") anndata = to_anndata(soma) if soma._verbose: s2 = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START write {h5ad_path}") anndata.write_h5ad(h5ad_path) if soma._verbose: print( tiledbsc.util.format_elapsed(s2, f"{soma._indent}FINISH write {h5ad_path}") ) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.to_h5ad {soma.uri} -> {h5ad_path}" ) ) # ---------------------------------------------------------------- def to_anndata(soma: tiledbsc.
641
19
5,794
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
Unknown
util
true
module
33
35
false
false
[ "util", "soma", "SOMA", "util_ann", "raw_group", "annotation_matrix", "annotation_matrix_group", "annotation_pairwise_matrix_group", "AnnotationMatrix", "AnnotationMatrixGroup", "AnnotationPairwiseMatrixGroup", "assay_matrix", "assay_matrix_group", "AssayMatrix", "AssayMatrixGroup", "describe_ann_file", "RawGroup", "show_single_cell_group", "soma_collection", "soma_options", "SOMACollection", "SOMAOptions", "tiledb_array", "tiledb_group", "tiledb_object", "TileDBArray", "TileDBGroup", "TileDBObject", "uns_array", "uns_group", "UnsArray", "UnsGroup", "util_tiledb" ]
[ { "name": "annotation_dataframe", "type": "module" }, { "name": "annotation_matrix", "type": "module" }, { "name": "annotation_matrix_group", "type": "module" }, { "name": "annotation_pairwise_matrix_group", "type": "module" }, { "name": "AnnotationMatrix", "type": "class" }, { "name": "AnnotationMatrixGroup", "type": "class" }, { "name": "AnnotationPairwiseMatrixGroup", "type": "class" }, { "name": "assay_matrix", "type": "module" }, { "name": "assay_matrix_group", "type": "module" }, { "name": "AssayMatrix", "type": "class" }, { "name": "AssayMatrixGroup", "type": "class" }, { "name": "describe_ann_file", "type": "function" }, { "name": "io", "type": "module" }, { "name": "raw_group", "type": "module" }, { "name": "RawGroup", "type": "class" }, { "name": "show_single_cell_group", "type": "function" }, { "name": "SOMA", "type": "class" }, { "name": "soma", "type": "module" }, { "name": "soma_collection", "type": "module" }, { "name": "soma_options", "type": "module" }, { "name": "SOMACollection", "type": "class" }, { "name": "SOMAOptions", "type": "class" }, { "name": "tiledb_array", "type": "module" }, { "name": "tiledb_group", "type": "module" }, { "name": "tiledb_object", "type": "module" }, { "name": "TileDBArray", "type": "class" }, { "name": "TileDBGroup", "type": "class" }, { "name": "TileDBObject", "type": "class" }, { "name": "uns_array", "type": "module" }, { "name": "uns_group", "type": "module" }, { "name": "UnsArray", "type": "class" }, { "name": "UnsGroup", "type": "class" }, { "name": "util", "type": "module" }, { "name": "util_ann", "type": "module" }, { "name": "util_tiledb", "type": "module" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.from_matrix_and_dim_values(anndata.X, anndata.obs.index, anndata.var.index) soma._add_object(soma.X) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obs.from_dataframe(dataframe=anndata.obs, extent=256) soma._add_object(soma.obs) soma.var.from_dataframe(dataframe=anndata.var, extent=2048) soma._add_object(soma.var) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obsm.from_matrices_and_dim_values(anndata.obsm, anndata.obs_names) soma._add_object(soma.obsm) soma.varm.from_matrices_and_dim_values(anndata.varm, anndata.var_names) soma._add_object(soma.varm) soma.obsp.from_matrices_and_dim_values(anndata.obsp, anndata.obs_names) soma._add_object(soma.obsp) soma.varp.from_matrices_and_dim_values(anndata.varp, anndata.var_names) soma._add_object(soma.varp) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.raw != None: soma.raw.from_anndata(anndata) soma._add_object(soma.raw) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.uns != None: soma.uns.from_anndata_uns(anndata.uns) soma._add_object(soma.uns) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH WRITING {soma.uri}") ) # ---------------------------------------------------------------- def to_h5ad(soma: tiledbsc.SOMA, h5ad_path: str) -> None: """ Converts the soma group to anndata format and writes it to the specified .h5ad file. As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_h5ad {soma.uri} -> {h5ad_path}") anndata = to_anndata(soma) if soma._verbose: s2 = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START write {h5ad_path}") anndata.write_h5ad(h5ad_path) if soma._verbose: print( tiledbsc.util.format_elapsed(s2, f"{soma._indent}FINISH write {h5ad_path}") ) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.to_h5ad {soma.uri} -> {h5ad_path}" ) ) # ---------------------------------------------------------------- def to_anndata(soma: tiledbsc.SOMA) -> ad.AnnData: """ Converts the soma group to anndata. Choice of matrix formats is following what we often see in input .h5ad files: * X as scipy.sparse.csr_matrix * obs,var as pandas.dataframe * obsm,varm arrays as numpy.ndarray * obsp,varp arrays as scipy.sparse.csr_matrix As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.
642
19
5,799
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
Unknown
get_start_stamp
true
function
10
16
false
false
[ "format_elapsed", "get_start_stamp", "List", "Optional", "ETATracker", "_find_csc_chunk_size", "_find_csr_chunk_size", "_get_sort_and_permutation", "_to_tiledb_supported_array_type", "_X_and_ids_to_coo" ]
[ { "name": "ad", "type": "module" }, { "name": "ETATracker", "type": "class" }, { "name": "format_elapsed", "type": "function" }, { "name": "get_start_stamp", "type": "function" }, { "name": "List", "type": "class" }, { "name": "numpy", "type": "module" }, { "name": "Optional", "type": "class" }, { "name": "pd", "type": "module" }, { "name": "scipy", "type": "module" }, { "name": "tiledb", "type": "module" }, { "name": "time", "type": "module" }, { "name": "_find_csc_chunk_size", "type": "function" }, { "name": "_find_csr_chunk_size", "type": "function" }, { "name": "_get_sort_and_permutation", "type": "function" }, { "name": "_to_tiledb_supported_array_type", "type": "function" }, { "name": "_X_and_ids_to_coo", "type": "function" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.from_matrix_and_dim_values(anndata.X, anndata.obs.index, anndata.var.index) soma._add_object(soma.X) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obs.from_dataframe(dataframe=anndata.obs, extent=256) soma._add_object(soma.obs) soma.var.from_dataframe(dataframe=anndata.var, extent=2048) soma._add_object(soma.var) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obsm.from_matrices_and_dim_values(anndata.obsm, anndata.obs_names) soma._add_object(soma.obsm) soma.varm.from_matrices_and_dim_values(anndata.varm, anndata.var_names) soma._add_object(soma.varm) soma.obsp.from_matrices_and_dim_values(anndata.obsp, anndata.obs_names) soma._add_object(soma.obsp) soma.varp.from_matrices_and_dim_values(anndata.varp, anndata.var_names) soma._add_object(soma.varp) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.raw != None: soma.raw.from_anndata(anndata) soma._add_object(soma.raw) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.uns != None: soma.uns.from_anndata_uns(anndata.uns) soma._add_object(soma.uns) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH WRITING {soma.uri}") ) # ---------------------------------------------------------------- def to_h5ad(soma: tiledbsc.SOMA, h5ad_path: str) -> None: """ Converts the soma group to anndata format and writes it to the specified .h5ad file. As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_h5ad {soma.uri} -> {h5ad_path}") anndata = to_anndata(soma) if soma._verbose: s2 = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START write {h5ad_path}") anndata.write_h5ad(h5ad_path) if soma._verbose: print( tiledbsc.util.format_elapsed(s2, f"{soma._indent}FINISH write {h5ad_path}") ) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.to_h5ad {soma.uri} -> {h5ad_path}" ) ) # ---------------------------------------------------------------- def to_anndata(soma: tiledbsc.SOMA) -> ad.AnnData: """ Converts the soma group to anndata. Choice of matrix formats is following what we often see in input .h5ad files: * X as scipy.sparse.csr_matrix * obs,var as pandas.dataframe * obsm,varm arrays as numpy.ndarray * obsp,varp arrays as scipy.sparse.csr_matrix As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.
645
19
5,892
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
inproject
to_dataframe
true
function
18
24
false
true
[ "to_dataframe", "uri", "from_dataframe", "df", "name", "attr_names", "attr_names_to_types", "attribute_filter", "dim_name", "dim_names", "dim_names_to_types", "dim_select", "exists", "has_attr_name", "ids", "keys", "shape", "tiledb_array_schema", "__init__", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "attr_names", "type": "function" }, { "name": "attr_names_to_types", "type": "function" }, { "name": "attribute_filter", "type": "function" }, { "name": "df", "type": "function" }, { "name": "dim_name", "type": "statement" }, { "name": "dim_names", "type": "function" }, { "name": "dim_names_to_types", "type": "function" }, { "name": "dim_select", "type": "function" }, { "name": "exists", "type": "function" }, { "name": "from_dataframe", "type": "function" }, { "name": "has_attr_name", "type": "function" }, { "name": "ids", "type": "function" }, { "name": "keys", "type": "function" }, { "name": "name", "type": "statement" }, { "name": "shape", "type": "function" }, { "name": "tiledb_array_schema", "type": "function" }, { "name": "to_dataframe", "type": "function" }, { "name": "uri", "type": "statement" }, { "name": "_ctx", "type": "statement" }, { "name": "_indent", "type": "statement" }, { "name": "_object_type", "type": "function" }, { "name": "_open", "type": "function" }, { "name": "_soma_options", "type": "statement" }, { "name": "_verbose", "type": "statement" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.from_matrix_and_dim_values(anndata.X, anndata.obs.index, anndata.var.index) soma._add_object(soma.X) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obs.from_dataframe(dataframe=anndata.obs, extent=256) soma._add_object(soma.obs) soma.var.from_dataframe(dataframe=anndata.var, extent=2048) soma._add_object(soma.var) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obsm.from_matrices_and_dim_values(anndata.obsm, anndata.obs_names) soma._add_object(soma.obsm) soma.varm.from_matrices_and_dim_values(anndata.varm, anndata.var_names) soma._add_object(soma.varm) soma.obsp.from_matrices_and_dim_values(anndata.obsp, anndata.obs_names) soma._add_object(soma.obsp) soma.varp.from_matrices_and_dim_values(anndata.varp, anndata.var_names) soma._add_object(soma.varp) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.raw != None: soma.raw.from_anndata(anndata) soma._add_object(soma.raw) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.uns != None: soma.uns.from_anndata_uns(anndata.uns) soma._add_object(soma.uns) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH WRITING {soma.uri}") ) # ---------------------------------------------------------------- def to_h5ad(soma: tiledbsc.SOMA, h5ad_path: str) -> None: """ Converts the soma group to anndata format and writes it to the specified .h5ad file. As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_h5ad {soma.uri} -> {h5ad_path}") anndata = to_anndata(soma) if soma._verbose: s2 = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START write {h5ad_path}") anndata.write_h5ad(h5ad_path) if soma._verbose: print( tiledbsc.util.format_elapsed(s2, f"{soma._indent}FINISH write {h5ad_path}") ) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.to_h5ad {soma.uri} -> {h5ad_path}" ) ) # ---------------------------------------------------------------- def to_anndata(soma: tiledbsc.SOMA) -> ad.AnnData: """ Converts the soma group to anndata. Choice of matrix formats is following what we often see in input .h5ad files: * X as scipy.sparse.csr_matrix * obs,var as pandas.dataframe * obsm,varm arrays as numpy.ndarray * obsp,varp arrays as scipy.sparse.csr_matrix As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_anndata {soma.uri}") obs_df = soma.obs.
647
19
5,929
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
inproject
to_dataframe
true
function
18
24
false
false
[ "to_dataframe", "from_dataframe", "uri", "df", "name", "attr_names", "attr_names_to_types", "attribute_filter", "dim_name", "dim_names", "dim_names_to_types", "dim_select", "exists", "has_attr_name", "ids", "keys", "shape", "tiledb_array_schema", "__init__", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "attr_names", "type": "function" }, { "name": "attr_names_to_types", "type": "function" }, { "name": "attribute_filter", "type": "function" }, { "name": "df", "type": "function" }, { "name": "dim_name", "type": "statement" }, { "name": "dim_names", "type": "function" }, { "name": "dim_names_to_types", "type": "function" }, { "name": "dim_select", "type": "function" }, { "name": "exists", "type": "function" }, { "name": "from_dataframe", "type": "function" }, { "name": "has_attr_name", "type": "function" }, { "name": "ids", "type": "function" }, { "name": "keys", "type": "function" }, { "name": "name", "type": "statement" }, { "name": "shape", "type": "function" }, { "name": "tiledb_array_schema", "type": "function" }, { "name": "to_dataframe", "type": "function" }, { "name": "uri", "type": "statement" }, { "name": "_ctx", "type": "statement" }, { "name": "_indent", "type": "statement" }, { "name": "_object_type", "type": "function" }, { "name": "_open", "type": "function" }, { "name": "_soma_options", "type": "statement" }, { "name": "_verbose", "type": "statement" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.from_matrix_and_dim_values(anndata.X, anndata.obs.index, anndata.var.index) soma._add_object(soma.X) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obs.from_dataframe(dataframe=anndata.obs, extent=256) soma._add_object(soma.obs) soma.var.from_dataframe(dataframe=anndata.var, extent=2048) soma._add_object(soma.var) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obsm.from_matrices_and_dim_values(anndata.obsm, anndata.obs_names) soma._add_object(soma.obsm) soma.varm.from_matrices_and_dim_values(anndata.varm, anndata.var_names) soma._add_object(soma.varm) soma.obsp.from_matrices_and_dim_values(anndata.obsp, anndata.obs_names) soma._add_object(soma.obsp) soma.varp.from_matrices_and_dim_values(anndata.varp, anndata.var_names) soma._add_object(soma.varp) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.raw != None: soma.raw.from_anndata(anndata) soma._add_object(soma.raw) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.uns != None: soma.uns.from_anndata_uns(anndata.uns) soma._add_object(soma.uns) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH WRITING {soma.uri}") ) # ---------------------------------------------------------------- def to_h5ad(soma: tiledbsc.SOMA, h5ad_path: str) -> None: """ Converts the soma group to anndata format and writes it to the specified .h5ad file. As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_h5ad {soma.uri} -> {h5ad_path}") anndata = to_anndata(soma) if soma._verbose: s2 = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START write {h5ad_path}") anndata.write_h5ad(h5ad_path) if soma._verbose: print( tiledbsc.util.format_elapsed(s2, f"{soma._indent}FINISH write {h5ad_path}") ) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.to_h5ad {soma.uri} -> {h5ad_path}" ) ) # ---------------------------------------------------------------- def to_anndata(soma: tiledbsc.SOMA) -> ad.AnnData: """ Converts the soma group to anndata. Choice of matrix formats is following what we often see in input .h5ad files: * X as scipy.sparse.csr_matrix * obs,var as pandas.dataframe * obsm,varm arrays as numpy.ndarray * obsp,varp arrays as scipy.sparse.csr_matrix As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_anndata {soma.uri}") obs_df = soma.obs.to_dataframe() var_df = soma.var.
649
19
5,964
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
inproject
data
true
statement
5
18
false
true
[ "uri", "data", "from_matrix_and_dim_values", "name", "exists", "__init__", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "data", "type": "statement" }, { "name": "exists", "type": "function" }, { "name": "from_matrix_and_dim_values", "type": "function" }, { "name": "name", "type": "statement" }, { "name": "uri", "type": "statement" }, { "name": "_add_object", "type": "function" }, { "name": "_create", "type": "function" }, { "name": "_ctx", "type": "statement" }, { "name": "_get_member_names", "type": "function" }, { "name": "_get_member_names_to_uris", "type": "function" }, { "name": "_get_member_uris", "type": "function" }, { "name": "_indent", "type": "statement" }, { "name": "_object_type", "type": "function" }, { "name": "_open", "type": "function" }, { "name": "_open_withlessly", "type": "function" }, { "name": "_remove_object", "type": "function" }, { "name": "_soma_options", "type": "statement" }, { "name": "_verbose", "type": "statement" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.from_matrix_and_dim_values(anndata.X, anndata.obs.index, anndata.var.index) soma._add_object(soma.X) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obs.from_dataframe(dataframe=anndata.obs, extent=256) soma._add_object(soma.obs) soma.var.from_dataframe(dataframe=anndata.var, extent=2048) soma._add_object(soma.var) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obsm.from_matrices_and_dim_values(anndata.obsm, anndata.obs_names) soma._add_object(soma.obsm) soma.varm.from_matrices_and_dim_values(anndata.varm, anndata.var_names) soma._add_object(soma.varm) soma.obsp.from_matrices_and_dim_values(anndata.obsp, anndata.obs_names) soma._add_object(soma.obsp) soma.varp.from_matrices_and_dim_values(anndata.varp, anndata.var_names) soma._add_object(soma.varp) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.raw != None: soma.raw.from_anndata(anndata) soma._add_object(soma.raw) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.uns != None: soma.uns.from_anndata_uns(anndata.uns) soma._add_object(soma.uns) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH WRITING {soma.uri}") ) # ---------------------------------------------------------------- def to_h5ad(soma: tiledbsc.SOMA, h5ad_path: str) -> None: """ Converts the soma group to anndata format and writes it to the specified .h5ad file. As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_h5ad {soma.uri} -> {h5ad_path}") anndata = to_anndata(soma) if soma._verbose: s2 = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START write {h5ad_path}") anndata.write_h5ad(h5ad_path) if soma._verbose: print( tiledbsc.util.format_elapsed(s2, f"{soma._indent}FINISH write {h5ad_path}") ) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.to_h5ad {soma.uri} -> {h5ad_path}" ) ) # ---------------------------------------------------------------- def to_anndata(soma: tiledbsc.SOMA) -> ad.AnnData: """ Converts the soma group to anndata. Choice of matrix formats is following what we often see in input .h5ad files: * X as scipy.sparse.csr_matrix * obs,var as pandas.dataframe * obsm,varm arrays as numpy.ndarray * obsp,varp arrays as scipy.sparse.csr_matrix As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_anndata {soma.uri}") obs_df = soma.obs.to_dataframe() var_df = soma.var.to_dataframe() X_mat = soma.X.
650
19
5,969
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
inproject
to_csr_matrix
true
function
20
28
false
true
[ "to_csr_matrix", "uri", "from_matrix_and_dim_values", "df", "name", "attr_name", "attr_names", "attr_names_to_types", "col_dim_name", "dim_names", "dim_names_to_types", "dim_select", "exists", "has_attr_name", "ingest_data_cols_chunked", "ingest_data_dense_rows_chunked", "ingest_data_rows_chunked", "ingest_data_whole", "row_dim_name", "tiledb_array_schema", "__init__", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "attr_name", "type": "statement" }, { "name": "attr_names", "type": "function" }, { "name": "attr_names_to_types", "type": "function" }, { "name": "col_dim_name", "type": "statement" }, { "name": "df", "type": "function" }, { "name": "dim_names", "type": "function" }, { "name": "dim_names_to_types", "type": "function" }, { "name": "dim_select", "type": "function" }, { "name": "exists", "type": "function" }, { "name": "from_matrix_and_dim_values", "type": "function" }, { "name": "has_attr_name", "type": "function" }, { "name": "ingest_data_cols_chunked", "type": "function" }, { "name": "ingest_data_dense_rows_chunked", "type": "function" }, { "name": "ingest_data_rows_chunked", "type": "function" }, { "name": "ingest_data_whole", "type": "function" }, { "name": "name", "type": "statement" }, { "name": "row_dim_name", "type": "statement" }, { "name": "tiledb_array_schema", "type": "function" }, { "name": "to_csr_matrix", "type": "function" }, { "name": "uri", "type": "statement" }, { "name": "_create_empty_array", "type": "function" }, { "name": "_ctx", "type": "statement" }, { "name": "_indent", "type": "statement" }, { "name": "_ingest_data", "type": "function" }, { "name": "_object_type", "type": "function" }, { "name": "_open", "type": "function" }, { "name": "_soma_options", "type": "statement" }, { "name": "_verbose", "type": "statement" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.from_matrix_and_dim_values(anndata.X, anndata.obs.index, anndata.var.index) soma._add_object(soma.X) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obs.from_dataframe(dataframe=anndata.obs, extent=256) soma._add_object(soma.obs) soma.var.from_dataframe(dataframe=anndata.var, extent=2048) soma._add_object(soma.var) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obsm.from_matrices_and_dim_values(anndata.obsm, anndata.obs_names) soma._add_object(soma.obsm) soma.varm.from_matrices_and_dim_values(anndata.varm, anndata.var_names) soma._add_object(soma.varm) soma.obsp.from_matrices_and_dim_values(anndata.obsp, anndata.obs_names) soma._add_object(soma.obsp) soma.varp.from_matrices_and_dim_values(anndata.varp, anndata.var_names) soma._add_object(soma.varp) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.raw != None: soma.raw.from_anndata(anndata) soma._add_object(soma.raw) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.uns != None: soma.uns.from_anndata_uns(anndata.uns) soma._add_object(soma.uns) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH WRITING {soma.uri}") ) # ---------------------------------------------------------------- def to_h5ad(soma: tiledbsc.SOMA, h5ad_path: str) -> None: """ Converts the soma group to anndata format and writes it to the specified .h5ad file. As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_h5ad {soma.uri} -> {h5ad_path}") anndata = to_anndata(soma) if soma._verbose: s2 = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START write {h5ad_path}") anndata.write_h5ad(h5ad_path) if soma._verbose: print( tiledbsc.util.format_elapsed(s2, f"{soma._indent}FINISH write {h5ad_path}") ) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.to_h5ad {soma.uri} -> {h5ad_path}" ) ) # ---------------------------------------------------------------- def to_anndata(soma: tiledbsc.SOMA) -> ad.AnnData: """ Converts the soma group to anndata. Choice of matrix formats is following what we often see in input .h5ad files: * X as scipy.sparse.csr_matrix * obs,var as pandas.dataframe * obsm,varm arrays as numpy.ndarray * obsp,varp arrays as scipy.sparse.csr_matrix As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_anndata {soma.uri}") obs_df = soma.obs.to_dataframe() var_df = soma.var.to_dataframe() X_mat = soma.X.data.
652
19
6,033
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
inproject
to_dict_of_csr
true
function
7
20
false
true
[ "from_matrices_and_dim_values", "uri", "to_dict_of_csr", "name", "keys", "dim_name", "exists", "__contains__", "__getitem__", "__init__", "__iter__", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "dim_name", "type": "statement" }, { "name": "exists", "type": "function" }, { "name": "from_matrices_and_dim_values", "type": "function" }, { "name": "keys", "type": "function" }, { "name": "name", "type": "statement" }, { "name": "to_dict_of_csr", "type": "function" }, { "name": "uri", "type": "statement" }, { "name": "_add_object", "type": "function" }, { "name": "_create", "type": "function" }, { "name": "_ctx", "type": "statement" }, { "name": "_get_member_names", "type": "function" }, { "name": "_get_member_names_to_uris", "type": "function" }, { "name": "_get_member_uris", "type": "function" }, { "name": "_indent", "type": "statement" }, { "name": "_object_type", "type": "function" }, { "name": "_open", "type": "function" }, { "name": "_open_withlessly", "type": "function" }, { "name": "_remove_object", "type": "function" }, { "name": "_soma_options", "type": "statement" }, { "name": "_verbose", "type": "statement" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__contains__", "type": "function" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__getitem__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__iter__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.from_matrix_and_dim_values(anndata.X, anndata.obs.index, anndata.var.index) soma._add_object(soma.X) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obs.from_dataframe(dataframe=anndata.obs, extent=256) soma._add_object(soma.obs) soma.var.from_dataframe(dataframe=anndata.var, extent=2048) soma._add_object(soma.var) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obsm.from_matrices_and_dim_values(anndata.obsm, anndata.obs_names) soma._add_object(soma.obsm) soma.varm.from_matrices_and_dim_values(anndata.varm, anndata.var_names) soma._add_object(soma.varm) soma.obsp.from_matrices_and_dim_values(anndata.obsp, anndata.obs_names) soma._add_object(soma.obsp) soma.varp.from_matrices_and_dim_values(anndata.varp, anndata.var_names) soma._add_object(soma.varp) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.raw != None: soma.raw.from_anndata(anndata) soma._add_object(soma.raw) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.uns != None: soma.uns.from_anndata_uns(anndata.uns) soma._add_object(soma.uns) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH WRITING {soma.uri}") ) # ---------------------------------------------------------------- def to_h5ad(soma: tiledbsc.SOMA, h5ad_path: str) -> None: """ Converts the soma group to anndata format and writes it to the specified .h5ad file. As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_h5ad {soma.uri} -> {h5ad_path}") anndata = to_anndata(soma) if soma._verbose: s2 = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START write {h5ad_path}") anndata.write_h5ad(h5ad_path) if soma._verbose: print( tiledbsc.util.format_elapsed(s2, f"{soma._indent}FINISH write {h5ad_path}") ) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.to_h5ad {soma.uri} -> {h5ad_path}" ) ) # ---------------------------------------------------------------- def to_anndata(soma: tiledbsc.SOMA) -> ad.AnnData: """ Converts the soma group to anndata. Choice of matrix formats is following what we often see in input .h5ad files: * X as scipy.sparse.csr_matrix * obs,var as pandas.dataframe * obsm,varm arrays as numpy.ndarray * obsp,varp arrays as scipy.sparse.csr_matrix As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_anndata {soma.uri}") obs_df = soma.obs.to_dataframe() var_df = soma.var.to_dataframe() X_mat = soma.X.data.to_csr_matrix(obs_df.index, var_df.index) obsm = soma.obsm.
654
19
6,071
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
inproject
to_dict_of_csr
true
function
7
20
false
false
[ "from_matrices_and_dim_values", "uri", "to_dict_of_csr", "name", "keys", "dim_name", "exists", "__contains__", "__getitem__", "__init__", "__iter__", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "dim_name", "type": "statement" }, { "name": "exists", "type": "function" }, { "name": "from_matrices_and_dim_values", "type": "function" }, { "name": "keys", "type": "function" }, { "name": "name", "type": "statement" }, { "name": "to_dict_of_csr", "type": "function" }, { "name": "uri", "type": "statement" }, { "name": "_add_object", "type": "function" }, { "name": "_create", "type": "function" }, { "name": "_ctx", "type": "statement" }, { "name": "_get_member_names", "type": "function" }, { "name": "_get_member_names_to_uris", "type": "function" }, { "name": "_get_member_uris", "type": "function" }, { "name": "_indent", "type": "statement" }, { "name": "_object_type", "type": "function" }, { "name": "_open", "type": "function" }, { "name": "_open_withlessly", "type": "function" }, { "name": "_remove_object", "type": "function" }, { "name": "_soma_options", "type": "statement" }, { "name": "_verbose", "type": "statement" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__contains__", "type": "function" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__getitem__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__iter__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.from_matrix_and_dim_values(anndata.X, anndata.obs.index, anndata.var.index) soma._add_object(soma.X) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obs.from_dataframe(dataframe=anndata.obs, extent=256) soma._add_object(soma.obs) soma.var.from_dataframe(dataframe=anndata.var, extent=2048) soma._add_object(soma.var) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obsm.from_matrices_and_dim_values(anndata.obsm, anndata.obs_names) soma._add_object(soma.obsm) soma.varm.from_matrices_and_dim_values(anndata.varm, anndata.var_names) soma._add_object(soma.varm) soma.obsp.from_matrices_and_dim_values(anndata.obsp, anndata.obs_names) soma._add_object(soma.obsp) soma.varp.from_matrices_and_dim_values(anndata.varp, anndata.var_names) soma._add_object(soma.varp) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.raw != None: soma.raw.from_anndata(anndata) soma._add_object(soma.raw) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.uns != None: soma.uns.from_anndata_uns(anndata.uns) soma._add_object(soma.uns) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH WRITING {soma.uri}") ) # ---------------------------------------------------------------- def to_h5ad(soma: tiledbsc.SOMA, h5ad_path: str) -> None: """ Converts the soma group to anndata format and writes it to the specified .h5ad file. As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_h5ad {soma.uri} -> {h5ad_path}") anndata = to_anndata(soma) if soma._verbose: s2 = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START write {h5ad_path}") anndata.write_h5ad(h5ad_path) if soma._verbose: print( tiledbsc.util.format_elapsed(s2, f"{soma._indent}FINISH write {h5ad_path}") ) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.to_h5ad {soma.uri} -> {h5ad_path}" ) ) # ---------------------------------------------------------------- def to_anndata(soma: tiledbsc.SOMA) -> ad.AnnData: """ Converts the soma group to anndata. Choice of matrix formats is following what we often see in input .h5ad files: * X as scipy.sparse.csr_matrix * obs,var as pandas.dataframe * obsm,varm arrays as numpy.ndarray * obsp,varp arrays as scipy.sparse.csr_matrix As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_anndata {soma.uri}") obs_df = soma.obs.to_dataframe() var_df = soma.var.to_dataframe() X_mat = soma.X.data.to_csr_matrix(obs_df.index, var_df.index) obsm = soma.obsm.to_dict_of_csr() varm = soma.varm.
656
19
6,314
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
inproject
to_anndata_raw
true
function
9
22
false
true
[ "var", "X", "from_anndata", "varm", "uri", "exists", "name", "to_anndata_raw", "varp", "__init__", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "exists", "type": "function" }, { "name": "from_anndata", "type": "function" }, { "name": "name", "type": "statement" }, { "name": "to_anndata_raw", "type": "function" }, { "name": "uri", "type": "statement" }, { "name": "var", "type": "statement" }, { "name": "varm", "type": "statement" }, { "name": "varp", "type": "statement" }, { "name": "X", "type": "statement" }, { "name": "_add_object", "type": "function" }, { "name": "_create", "type": "function" }, { "name": "_ctx", "type": "statement" }, { "name": "_get_member_names", "type": "function" }, { "name": "_get_member_names_to_uris", "type": "function" }, { "name": "_get_member_uris", "type": "function" }, { "name": "_indent", "type": "statement" }, { "name": "_object_type", "type": "function" }, { "name": "_open", "type": "function" }, { "name": "_open_withlessly", "type": "function" }, { "name": "_remove_object", "type": "function" }, { "name": "_soma_options", "type": "statement" }, { "name": "_verbose", "type": "statement" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.from_matrix_and_dim_values(anndata.X, anndata.obs.index, anndata.var.index) soma._add_object(soma.X) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obs.from_dataframe(dataframe=anndata.obs, extent=256) soma._add_object(soma.obs) soma.var.from_dataframe(dataframe=anndata.var, extent=2048) soma._add_object(soma.var) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obsm.from_matrices_and_dim_values(anndata.obsm, anndata.obs_names) soma._add_object(soma.obsm) soma.varm.from_matrices_and_dim_values(anndata.varm, anndata.var_names) soma._add_object(soma.varm) soma.obsp.from_matrices_and_dim_values(anndata.obsp, anndata.obs_names) soma._add_object(soma.obsp) soma.varp.from_matrices_and_dim_values(anndata.varp, anndata.var_names) soma._add_object(soma.varp) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.raw != None: soma.raw.from_anndata(anndata) soma._add_object(soma.raw) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.uns != None: soma.uns.from_anndata_uns(anndata.uns) soma._add_object(soma.uns) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH WRITING {soma.uri}") ) # ---------------------------------------------------------------- def to_h5ad(soma: tiledbsc.SOMA, h5ad_path: str) -> None: """ Converts the soma group to anndata format and writes it to the specified .h5ad file. As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_h5ad {soma.uri} -> {h5ad_path}") anndata = to_anndata(soma) if soma._verbose: s2 = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START write {h5ad_path}") anndata.write_h5ad(h5ad_path) if soma._verbose: print( tiledbsc.util.format_elapsed(s2, f"{soma._indent}FINISH write {h5ad_path}") ) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.to_h5ad {soma.uri} -> {h5ad_path}" ) ) # ---------------------------------------------------------------- def to_anndata(soma: tiledbsc.SOMA) -> ad.AnnData: """ Converts the soma group to anndata. Choice of matrix formats is following what we often see in input .h5ad files: * X as scipy.sparse.csr_matrix * obs,var as pandas.dataframe * obsm,varm arrays as numpy.ndarray * obsp,varp arrays as scipy.sparse.csr_matrix As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_anndata {soma.uri}") obs_df = soma.obs.to_dataframe() var_df = soma.var.to_dataframe() X_mat = soma.X.data.to_csr_matrix(obs_df.index, var_df.index) obsm = soma.obsm.to_dict_of_csr() varm = soma.varm.to_dict_of_csr() # TODO print(" OBSP OUTGEST NOT WORKING YET") # obsp = soma.obsp.to_dict_of_csr() print(" VARP OUTGEST NOT WORKING YET") # varp = soma.varp.to_dict_of_csr() (raw_X, raw_var_df, raw_varm) = soma.raw.
658
19
6,550
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
inproject
to_dict_of_matrices
true
function
6
19
false
true
[ "from_anndata_uns", "uri", "name", "keys", "to_dict_of_matrices", "exists", "__contains__", "__getitem__", "__init__", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "exists", "type": "function" }, { "name": "from_anndata_uns", "type": "function" }, { "name": "keys", "type": "function" }, { "name": "name", "type": "statement" }, { "name": "to_dict_of_matrices", "type": "function" }, { "name": "uri", "type": "statement" }, { "name": "_add_object", "type": "function" }, { "name": "_create", "type": "function" }, { "name": "_ctx", "type": "statement" }, { "name": "_get_member_names", "type": "function" }, { "name": "_get_member_names_to_uris", "type": "function" }, { "name": "_get_member_uris", "type": "function" }, { "name": "_indent", "type": "statement" }, { "name": "_object_type", "type": "function" }, { "name": "_open", "type": "function" }, { "name": "_open_withlessly", "type": "function" }, { "name": "_remove_object", "type": "function" }, { "name": "_soma_options", "type": "statement" }, { "name": "_verbose", "type": "statement" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__contains__", "type": "function" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__getitem__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.from_matrix_and_dim_values(anndata.X, anndata.obs.index, anndata.var.index) soma._add_object(soma.X) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obs.from_dataframe(dataframe=anndata.obs, extent=256) soma._add_object(soma.obs) soma.var.from_dataframe(dataframe=anndata.var, extent=2048) soma._add_object(soma.var) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obsm.from_matrices_and_dim_values(anndata.obsm, anndata.obs_names) soma._add_object(soma.obsm) soma.varm.from_matrices_and_dim_values(anndata.varm, anndata.var_names) soma._add_object(soma.varm) soma.obsp.from_matrices_and_dim_values(anndata.obsp, anndata.obs_names) soma._add_object(soma.obsp) soma.varp.from_matrices_and_dim_values(anndata.varp, anndata.var_names) soma._add_object(soma.varp) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.raw != None: soma.raw.from_anndata(anndata) soma._add_object(soma.raw) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.uns != None: soma.uns.from_anndata_uns(anndata.uns) soma._add_object(soma.uns) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH WRITING {soma.uri}") ) # ---------------------------------------------------------------- def to_h5ad(soma: tiledbsc.SOMA, h5ad_path: str) -> None: """ Converts the soma group to anndata format and writes it to the specified .h5ad file. As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_h5ad {soma.uri} -> {h5ad_path}") anndata = to_anndata(soma) if soma._verbose: s2 = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START write {h5ad_path}") anndata.write_h5ad(h5ad_path) if soma._verbose: print( tiledbsc.util.format_elapsed(s2, f"{soma._indent}FINISH write {h5ad_path}") ) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.to_h5ad {soma.uri} -> {h5ad_path}" ) ) # ---------------------------------------------------------------- def to_anndata(soma: tiledbsc.SOMA) -> ad.AnnData: """ Converts the soma group to anndata. Choice of matrix formats is following what we often see in input .h5ad files: * X as scipy.sparse.csr_matrix * obs,var as pandas.dataframe * obsm,varm arrays as numpy.ndarray * obsp,varp arrays as scipy.sparse.csr_matrix As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_anndata {soma.uri}") obs_df = soma.obs.to_dataframe() var_df = soma.var.to_dataframe() X_mat = soma.X.data.to_csr_matrix(obs_df.index, var_df.index) obsm = soma.obsm.to_dict_of_csr() varm = soma.varm.to_dict_of_csr() # TODO print(" OBSP OUTGEST NOT WORKING YET") # obsp = soma.obsp.to_dict_of_csr() print(" VARP OUTGEST NOT WORKING YET") # varp = soma.varp.to_dict_of_csr() (raw_X, raw_var_df, raw_varm) = soma.raw.to_anndata_raw(obs_df.index) anndata = ad.AnnData(X=X_mat, obs=obs_df, var=var_df, obsm=obsm, varm=varm) raw = ad.Raw( anndata, X=raw_X, var=raw_var_df, varm=raw_varm, ) uns = soma.uns.
660
19
6,953
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
Unknown
util
true
module
33
35
false
false
[ "util", "soma", "SOMA", "util_ann", "raw_group", "annotation_matrix", "annotation_matrix_group", "annotation_pairwise_matrix_group", "AnnotationMatrix", "AnnotationMatrixGroup", "AnnotationPairwiseMatrixGroup", "assay_matrix", "assay_matrix_group", "AssayMatrix", "AssayMatrixGroup", "describe_ann_file", "RawGroup", "show_single_cell_group", "soma_collection", "soma_options", "SOMACollection", "SOMAOptions", "tiledb_array", "tiledb_group", "tiledb_object", "TileDBArray", "TileDBGroup", "TileDBObject", "uns_array", "uns_group", "UnsArray", "UnsGroup", "util_tiledb" ]
[ { "name": "annotation_dataframe", "type": "module" }, { "name": "annotation_matrix", "type": "module" }, { "name": "annotation_matrix_group", "type": "module" }, { "name": "annotation_pairwise_matrix_group", "type": "module" }, { "name": "AnnotationMatrix", "type": "class" }, { "name": "AnnotationMatrixGroup", "type": "class" }, { "name": "AnnotationPairwiseMatrixGroup", "type": "class" }, { "name": "assay_matrix", "type": "module" }, { "name": "assay_matrix_group", "type": "module" }, { "name": "AssayMatrix", "type": "class" }, { "name": "AssayMatrixGroup", "type": "class" }, { "name": "describe_ann_file", "type": "function" }, { "name": "io", "type": "module" }, { "name": "raw_group", "type": "module" }, { "name": "RawGroup", "type": "class" }, { "name": "show_single_cell_group", "type": "function" }, { "name": "SOMA", "type": "class" }, { "name": "soma", "type": "module" }, { "name": "soma_collection", "type": "module" }, { "name": "soma_options", "type": "module" }, { "name": "SOMACollection", "type": "class" }, { "name": "SOMAOptions", "type": "class" }, { "name": "tiledb_array", "type": "module" }, { "name": "tiledb_group", "type": "module" }, { "name": "tiledb_object", "type": "module" }, { "name": "TileDBArray", "type": "class" }, { "name": "TileDBGroup", "type": "class" }, { "name": "TileDBObject", "type": "class" }, { "name": "uns_array", "type": "module" }, { "name": "uns_group", "type": "module" }, { "name": "UnsArray", "type": "class" }, { "name": "UnsGroup", "type": "class" }, { "name": "util", "type": "module" }, { "name": "util_ann", "type": "module" }, { "name": "util_tiledb", "type": "module" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.from_matrix_and_dim_values(anndata.X, anndata.obs.index, anndata.var.index) soma._add_object(soma.X) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obs.from_dataframe(dataframe=anndata.obs, extent=256) soma._add_object(soma.obs) soma.var.from_dataframe(dataframe=anndata.var, extent=2048) soma._add_object(soma.var) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obsm.from_matrices_and_dim_values(anndata.obsm, anndata.obs_names) soma._add_object(soma.obsm) soma.varm.from_matrices_and_dim_values(anndata.varm, anndata.var_names) soma._add_object(soma.varm) soma.obsp.from_matrices_and_dim_values(anndata.obsp, anndata.obs_names) soma._add_object(soma.obsp) soma.varp.from_matrices_and_dim_values(anndata.varp, anndata.var_names) soma._add_object(soma.varp) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.raw != None: soma.raw.from_anndata(anndata) soma._add_object(soma.raw) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.uns != None: soma.uns.from_anndata_uns(anndata.uns) soma._add_object(soma.uns) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH WRITING {soma.uri}") ) # ---------------------------------------------------------------- def to_h5ad(soma: tiledbsc.SOMA, h5ad_path: str) -> None: """ Converts the soma group to anndata format and writes it to the specified .h5ad file. As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_h5ad {soma.uri} -> {h5ad_path}") anndata = to_anndata(soma) if soma._verbose: s2 = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START write {h5ad_path}") anndata.write_h5ad(h5ad_path) if soma._verbose: print( tiledbsc.util.format_elapsed(s2, f"{soma._indent}FINISH write {h5ad_path}") ) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.to_h5ad {soma.uri} -> {h5ad_path}" ) ) # ---------------------------------------------------------------- def to_anndata(soma: tiledbsc.SOMA) -> ad.AnnData: """ Converts the soma group to anndata. Choice of matrix formats is following what we often see in input .h5ad files: * X as scipy.sparse.csr_matrix * obs,var as pandas.dataframe * obsm,varm arrays as numpy.ndarray * obsp,varp arrays as scipy.sparse.csr_matrix As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_anndata {soma.uri}") obs_df = soma.obs.to_dataframe() var_df = soma.var.to_dataframe() X_mat = soma.X.data.to_csr_matrix(obs_df.index, var_df.index) obsm = soma.obsm.to_dict_of_csr() varm = soma.varm.to_dict_of_csr() # TODO print(" OBSP OUTGEST NOT WORKING YET") # obsp = soma.obsp.to_dict_of_csr() print(" VARP OUTGEST NOT WORKING YET") # varp = soma.varp.to_dict_of_csr() (raw_X, raw_var_df, raw_varm) = soma.raw.to_anndata_raw(obs_df.index) anndata = ad.AnnData(X=X_mat, obs=obs_df, var=var_df, obsm=obsm, varm=varm) raw = ad.Raw( anndata, X=raw_X, var=raw_var_df, varm=raw_varm, ) uns = soma.uns.to_dict_of_matrices() anndata = ad.AnnData( X=anndata.X, dtype=None if anndata.X is None else anndata.X.dtype, # some datasets have no X obs=anndata.obs, var=anndata.var, obsm=anndata.obsm, obsp=anndata.obsp, varm=anndata.varm, varp=anndata.varp, raw=raw, uns=uns, ) if soma._verbose: print(tiledbsc.
661
19
6,958
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
Unknown
format_elapsed
true
function
10
16
false
false
[ "format_elapsed", "get_start_stamp", "List", "Optional", "ETATracker", "_find_csc_chunk_size", "_find_csr_chunk_size", "_get_sort_and_permutation", "_to_tiledb_supported_array_type", "_X_and_ids_to_coo" ]
[ { "name": "ad", "type": "module" }, { "name": "ETATracker", "type": "class" }, { "name": "format_elapsed", "type": "function" }, { "name": "get_start_stamp", "type": "function" }, { "name": "List", "type": "class" }, { "name": "numpy", "type": "module" }, { "name": "Optional", "type": "class" }, { "name": "pd", "type": "module" }, { "name": "scipy", "type": "module" }, { "name": "tiledb", "type": "module" }, { "name": "time", "type": "module" }, { "name": "_find_csc_chunk_size", "type": "function" }, { "name": "_find_csr_chunk_size", "type": "function" }, { "name": "_get_sort_and_permutation", "type": "function" }, { "name": "_to_tiledb_supported_array_type", "type": "function" }, { "name": "_X_and_ids_to_coo", "type": "function" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.from_matrix_and_dim_values(anndata.X, anndata.obs.index, anndata.var.index) soma._add_object(soma.X) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obs.from_dataframe(dataframe=anndata.obs, extent=256) soma._add_object(soma.obs) soma.var.from_dataframe(dataframe=anndata.var, extent=2048) soma._add_object(soma.var) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obsm.from_matrices_and_dim_values(anndata.obsm, anndata.obs_names) soma._add_object(soma.obsm) soma.varm.from_matrices_and_dim_values(anndata.varm, anndata.var_names) soma._add_object(soma.varm) soma.obsp.from_matrices_and_dim_values(anndata.obsp, anndata.obs_names) soma._add_object(soma.obsp) soma.varp.from_matrices_and_dim_values(anndata.varp, anndata.var_names) soma._add_object(soma.varp) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.raw != None: soma.raw.from_anndata(anndata) soma._add_object(soma.raw) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.uns != None: soma.uns.from_anndata_uns(anndata.uns) soma._add_object(soma.uns) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH WRITING {soma.uri}") ) # ---------------------------------------------------------------- def to_h5ad(soma: tiledbsc.SOMA, h5ad_path: str) -> None: """ Converts the soma group to anndata format and writes it to the specified .h5ad file. As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_h5ad {soma.uri} -> {h5ad_path}") anndata = to_anndata(soma) if soma._verbose: s2 = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START write {h5ad_path}") anndata.write_h5ad(h5ad_path) if soma._verbose: print( tiledbsc.util.format_elapsed(s2, f"{soma._indent}FINISH write {h5ad_path}") ) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.to_h5ad {soma.uri} -> {h5ad_path}" ) ) # ---------------------------------------------------------------- def to_anndata(soma: tiledbsc.SOMA) -> ad.AnnData: """ Converts the soma group to anndata. Choice of matrix formats is following what we often see in input .h5ad files: * X as scipy.sparse.csr_matrix * obs,var as pandas.dataframe * obsm,varm arrays as numpy.ndarray * obsp,varp arrays as scipy.sparse.csr_matrix As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_anndata {soma.uri}") obs_df = soma.obs.to_dataframe() var_df = soma.var.to_dataframe() X_mat = soma.X.data.to_csr_matrix(obs_df.index, var_df.index) obsm = soma.obsm.to_dict_of_csr() varm = soma.varm.to_dict_of_csr() # TODO print(" OBSP OUTGEST NOT WORKING YET") # obsp = soma.obsp.to_dict_of_csr() print(" VARP OUTGEST NOT WORKING YET") # varp = soma.varp.to_dict_of_csr() (raw_X, raw_var_df, raw_varm) = soma.raw.to_anndata_raw(obs_df.index) anndata = ad.AnnData(X=X_mat, obs=obs_df, var=var_df, obsm=obsm, varm=varm) raw = ad.Raw( anndata, X=raw_X, var=raw_var_df, varm=raw_varm, ) uns = soma.uns.to_dict_of_matrices() anndata = ad.AnnData( X=anndata.X, dtype=None if anndata.X is None else anndata.X.dtype, # some datasets have no X obs=anndata.obs, var=anndata.var, obsm=anndata.obsm, obsp=anndata.obsp, varm=anndata.varm, varp=anndata.varp, raw=raw, uns=uns, ) if soma._verbose: print(tiledbsc.util.
663
19
7,143
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
Unknown
SOMA
true
class
34
35
false
false
[ "SOMA", "util", "soma", "None", "util_ann", "annotation_matrix", "annotation_matrix_group", "annotation_pairwise_matrix_group", "AnnotationMatrix", "AnnotationMatrixGroup", "AnnotationPairwiseMatrixGroup", "assay_matrix", "assay_matrix_group", "AssayMatrix", "AssayMatrixGroup", "describe_ann_file", "raw_group", "RawGroup", "show_single_cell_group", "soma_collection", "soma_options", "SOMACollection", "SOMAOptions", "tiledb_array", "tiledb_group", "tiledb_object", "TileDBArray", "TileDBGroup", "TileDBObject", "uns_array", "uns_group", "UnsArray", "UnsGroup", "util_tiledb" ]
[ { "name": "annotation_dataframe", "type": "module" }, { "name": "annotation_matrix", "type": "module" }, { "name": "annotation_matrix_group", "type": "module" }, { "name": "annotation_pairwise_matrix_group", "type": "module" }, { "name": "AnnotationMatrix", "type": "class" }, { "name": "AnnotationMatrixGroup", "type": "class" }, { "name": "AnnotationPairwiseMatrixGroup", "type": "class" }, { "name": "assay_matrix", "type": "module" }, { "name": "assay_matrix_group", "type": "module" }, { "name": "AssayMatrix", "type": "class" }, { "name": "AssayMatrixGroup", "type": "class" }, { "name": "describe_ann_file", "type": "function" }, { "name": "io", "type": "module" }, { "name": "raw_group", "type": "module" }, { "name": "RawGroup", "type": "class" }, { "name": "show_single_cell_group", "type": "function" }, { "name": "SOMA", "type": "class" }, { "name": "soma", "type": "module" }, { "name": "soma_collection", "type": "module" }, { "name": "soma_options", "type": "module" }, { "name": "SOMACollection", "type": "class" }, { "name": "SOMAOptions", "type": "class" }, { "name": "tiledb_array", "type": "module" }, { "name": "tiledb_group", "type": "module" }, { "name": "tiledb_object", "type": "module" }, { "name": "TileDBArray", "type": "class" }, { "name": "TileDBGroup", "type": "class" }, { "name": "TileDBObject", "type": "class" }, { "name": "uns_array", "type": "module" }, { "name": "uns_group", "type": "module" }, { "name": "UnsArray", "type": "class" }, { "name": "UnsGroup", "type": "class" }, { "name": "util", "type": "module" }, { "name": "util_ann", "type": "module" }, { "name": "util_tiledb", "type": "module" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.from_matrix_and_dim_values(anndata.X, anndata.obs.index, anndata.var.index) soma._add_object(soma.X) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obs.from_dataframe(dataframe=anndata.obs, extent=256) soma._add_object(soma.obs) soma.var.from_dataframe(dataframe=anndata.var, extent=2048) soma._add_object(soma.var) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obsm.from_matrices_and_dim_values(anndata.obsm, anndata.obs_names) soma._add_object(soma.obsm) soma.varm.from_matrices_and_dim_values(anndata.varm, anndata.var_names) soma._add_object(soma.varm) soma.obsp.from_matrices_and_dim_values(anndata.obsp, anndata.obs_names) soma._add_object(soma.obsp) soma.varp.from_matrices_and_dim_values(anndata.varp, anndata.var_names) soma._add_object(soma.varp) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.raw != None: soma.raw.from_anndata(anndata) soma._add_object(soma.raw) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.uns != None: soma.uns.from_anndata_uns(anndata.uns) soma._add_object(soma.uns) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH WRITING {soma.uri}") ) # ---------------------------------------------------------------- def to_h5ad(soma: tiledbsc.SOMA, h5ad_path: str) -> None: """ Converts the soma group to anndata format and writes it to the specified .h5ad file. As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_h5ad {soma.uri} -> {h5ad_path}") anndata = to_anndata(soma) if soma._verbose: s2 = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START write {h5ad_path}") anndata.write_h5ad(h5ad_path) if soma._verbose: print( tiledbsc.util.format_elapsed(s2, f"{soma._indent}FINISH write {h5ad_path}") ) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.to_h5ad {soma.uri} -> {h5ad_path}" ) ) # ---------------------------------------------------------------- def to_anndata(soma: tiledbsc.SOMA) -> ad.AnnData: """ Converts the soma group to anndata. Choice of matrix formats is following what we often see in input .h5ad files: * X as scipy.sparse.csr_matrix * obs,var as pandas.dataframe * obsm,varm arrays as numpy.ndarray * obsp,varp arrays as scipy.sparse.csr_matrix As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_anndata {soma.uri}") obs_df = soma.obs.to_dataframe() var_df = soma.var.to_dataframe() X_mat = soma.X.data.to_csr_matrix(obs_df.index, var_df.index) obsm = soma.obsm.to_dict_of_csr() varm = soma.varm.to_dict_of_csr() # TODO print(" OBSP OUTGEST NOT WORKING YET") # obsp = soma.obsp.to_dict_of_csr() print(" VARP OUTGEST NOT WORKING YET") # varp = soma.varp.to_dict_of_csr() (raw_X, raw_var_df, raw_varm) = soma.raw.to_anndata_raw(obs_df.index) anndata = ad.AnnData(X=X_mat, obs=obs_df, var=var_df, obsm=obsm, varm=varm) raw = ad.Raw( anndata, X=raw_X, var=raw_var_df, varm=raw_varm, ) uns = soma.uns.to_dict_of_matrices() anndata = ad.AnnData( X=anndata.X, dtype=None if anndata.X is None else anndata.X.dtype, # some datasets have no X obs=anndata.obs, var=anndata.var, obsm=anndata.obsm, obsp=anndata.obsp, varm=anndata.varm, varp=anndata.varp, raw=raw, uns=uns, ) if soma._verbose: print(tiledbsc.util.format_elapsed(s, f"FINISH SOMA.to_anndata {soma.uri}")) return anndata # ---------------------------------------------------------------- def to_anndata_from_raw(soma: tiledbsc.
665
19
7,259
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
Unknown
to_dataframe
true
function
18
24
false
false
[ "from_dataframe", "to_dataframe", "uri", "df", "name", "attr_names", "attr_names_to_types", "attribute_filter", "dim_name", "dim_names", "dim_names_to_types", "dim_select", "exists", "has_attr_name", "ids", "keys", "shape", "tiledb_array_schema", "__init__", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "attr_names", "type": "function" }, { "name": "attr_names_to_types", "type": "function" }, { "name": "attribute_filter", "type": "function" }, { "name": "df", "type": "function" }, { "name": "dim_name", "type": "statement" }, { "name": "dim_names", "type": "function" }, { "name": "dim_names_to_types", "type": "function" }, { "name": "dim_select", "type": "function" }, { "name": "exists", "type": "function" }, { "name": "from_dataframe", "type": "function" }, { "name": "has_attr_name", "type": "function" }, { "name": "ids", "type": "function" }, { "name": "keys", "type": "function" }, { "name": "name", "type": "statement" }, { "name": "shape", "type": "function" }, { "name": "tiledb_array_schema", "type": "function" }, { "name": "to_dataframe", "type": "function" }, { "name": "uri", "type": "statement" }, { "name": "_ctx", "type": "statement" }, { "name": "_indent", "type": "statement" }, { "name": "_object_type", "type": "function" }, { "name": "_open", "type": "function" }, { "name": "_soma_options", "type": "statement" }, { "name": "_verbose", "type": "statement" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.from_matrix_and_dim_values(anndata.X, anndata.obs.index, anndata.var.index) soma._add_object(soma.X) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obs.from_dataframe(dataframe=anndata.obs, extent=256) soma._add_object(soma.obs) soma.var.from_dataframe(dataframe=anndata.var, extent=2048) soma._add_object(soma.var) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obsm.from_matrices_and_dim_values(anndata.obsm, anndata.obs_names) soma._add_object(soma.obsm) soma.varm.from_matrices_and_dim_values(anndata.varm, anndata.var_names) soma._add_object(soma.varm) soma.obsp.from_matrices_and_dim_values(anndata.obsp, anndata.obs_names) soma._add_object(soma.obsp) soma.varp.from_matrices_and_dim_values(anndata.varp, anndata.var_names) soma._add_object(soma.varp) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.raw != None: soma.raw.from_anndata(anndata) soma._add_object(soma.raw) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.uns != None: soma.uns.from_anndata_uns(anndata.uns) soma._add_object(soma.uns) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH WRITING {soma.uri}") ) # ---------------------------------------------------------------- def to_h5ad(soma: tiledbsc.SOMA, h5ad_path: str) -> None: """ Converts the soma group to anndata format and writes it to the specified .h5ad file. As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_h5ad {soma.uri} -> {h5ad_path}") anndata = to_anndata(soma) if soma._verbose: s2 = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START write {h5ad_path}") anndata.write_h5ad(h5ad_path) if soma._verbose: print( tiledbsc.util.format_elapsed(s2, f"{soma._indent}FINISH write {h5ad_path}") ) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.to_h5ad {soma.uri} -> {h5ad_path}" ) ) # ---------------------------------------------------------------- def to_anndata(soma: tiledbsc.SOMA) -> ad.AnnData: """ Converts the soma group to anndata. Choice of matrix formats is following what we often see in input .h5ad files: * X as scipy.sparse.csr_matrix * obs,var as pandas.dataframe * obsm,varm arrays as numpy.ndarray * obsp,varp arrays as scipy.sparse.csr_matrix As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_anndata {soma.uri}") obs_df = soma.obs.to_dataframe() var_df = soma.var.to_dataframe() X_mat = soma.X.data.to_csr_matrix(obs_df.index, var_df.index) obsm = soma.obsm.to_dict_of_csr() varm = soma.varm.to_dict_of_csr() # TODO print(" OBSP OUTGEST NOT WORKING YET") # obsp = soma.obsp.to_dict_of_csr() print(" VARP OUTGEST NOT WORKING YET") # varp = soma.varp.to_dict_of_csr() (raw_X, raw_var_df, raw_varm) = soma.raw.to_anndata_raw(obs_df.index) anndata = ad.AnnData(X=X_mat, obs=obs_df, var=var_df, obsm=obsm, varm=varm) raw = ad.Raw( anndata, X=raw_X, var=raw_var_df, varm=raw_varm, ) uns = soma.uns.to_dict_of_matrices() anndata = ad.AnnData( X=anndata.X, dtype=None if anndata.X is None else anndata.X.dtype, # some datasets have no X obs=anndata.obs, var=anndata.var, obsm=anndata.obsm, obsp=anndata.obsp, varm=anndata.varm, varp=anndata.varp, raw=raw, uns=uns, ) if soma._verbose: print(tiledbsc.util.format_elapsed(s, f"FINISH SOMA.to_anndata {soma.uri}")) return anndata # ---------------------------------------------------------------- def to_anndata_from_raw(soma: tiledbsc.SOMA) -> ad.AnnData: """ Extract only the raw parts as a new AnnData object. """ obs_df = soma.obs.
667
19
7,296
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
inproject
var
true
statement
9
22
false
true
[ "var", "to_anndata_raw", "X", "uri", "varm", "exists", "from_anndata", "name", "varp", "__init__", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "exists", "type": "function" }, { "name": "from_anndata", "type": "function" }, { "name": "name", "type": "statement" }, { "name": "to_anndata_raw", "type": "function" }, { "name": "uri", "type": "statement" }, { "name": "var", "type": "statement" }, { "name": "varm", "type": "statement" }, { "name": "varp", "type": "statement" }, { "name": "X", "type": "statement" }, { "name": "_add_object", "type": "function" }, { "name": "_create", "type": "function" }, { "name": "_ctx", "type": "statement" }, { "name": "_get_member_names", "type": "function" }, { "name": "_get_member_names_to_uris", "type": "function" }, { "name": "_get_member_uris", "type": "function" }, { "name": "_indent", "type": "statement" }, { "name": "_object_type", "type": "function" }, { "name": "_open", "type": "function" }, { "name": "_open_withlessly", "type": "function" }, { "name": "_remove_object", "type": "function" }, { "name": "_soma_options", "type": "statement" }, { "name": "_verbose", "type": "statement" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.from_matrix_and_dim_values(anndata.X, anndata.obs.index, anndata.var.index) soma._add_object(soma.X) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obs.from_dataframe(dataframe=anndata.obs, extent=256) soma._add_object(soma.obs) soma.var.from_dataframe(dataframe=anndata.var, extent=2048) soma._add_object(soma.var) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obsm.from_matrices_and_dim_values(anndata.obsm, anndata.obs_names) soma._add_object(soma.obsm) soma.varm.from_matrices_and_dim_values(anndata.varm, anndata.var_names) soma._add_object(soma.varm) soma.obsp.from_matrices_and_dim_values(anndata.obsp, anndata.obs_names) soma._add_object(soma.obsp) soma.varp.from_matrices_and_dim_values(anndata.varp, anndata.var_names) soma._add_object(soma.varp) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.raw != None: soma.raw.from_anndata(anndata) soma._add_object(soma.raw) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.uns != None: soma.uns.from_anndata_uns(anndata.uns) soma._add_object(soma.uns) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH WRITING {soma.uri}") ) # ---------------------------------------------------------------- def to_h5ad(soma: tiledbsc.SOMA, h5ad_path: str) -> None: """ Converts the soma group to anndata format and writes it to the specified .h5ad file. As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_h5ad {soma.uri} -> {h5ad_path}") anndata = to_anndata(soma) if soma._verbose: s2 = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START write {h5ad_path}") anndata.write_h5ad(h5ad_path) if soma._verbose: print( tiledbsc.util.format_elapsed(s2, f"{soma._indent}FINISH write {h5ad_path}") ) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.to_h5ad {soma.uri} -> {h5ad_path}" ) ) # ---------------------------------------------------------------- def to_anndata(soma: tiledbsc.SOMA) -> ad.AnnData: """ Converts the soma group to anndata. Choice of matrix formats is following what we often see in input .h5ad files: * X as scipy.sparse.csr_matrix * obs,var as pandas.dataframe * obsm,varm arrays as numpy.ndarray * obsp,varp arrays as scipy.sparse.csr_matrix As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_anndata {soma.uri}") obs_df = soma.obs.to_dataframe() var_df = soma.var.to_dataframe() X_mat = soma.X.data.to_csr_matrix(obs_df.index, var_df.index) obsm = soma.obsm.to_dict_of_csr() varm = soma.varm.to_dict_of_csr() # TODO print(" OBSP OUTGEST NOT WORKING YET") # obsp = soma.obsp.to_dict_of_csr() print(" VARP OUTGEST NOT WORKING YET") # varp = soma.varp.to_dict_of_csr() (raw_X, raw_var_df, raw_varm) = soma.raw.to_anndata_raw(obs_df.index) anndata = ad.AnnData(X=X_mat, obs=obs_df, var=var_df, obsm=obsm, varm=varm) raw = ad.Raw( anndata, X=raw_X, var=raw_var_df, varm=raw_varm, ) uns = soma.uns.to_dict_of_matrices() anndata = ad.AnnData( X=anndata.X, dtype=None if anndata.X is None else anndata.X.dtype, # some datasets have no X obs=anndata.obs, var=anndata.var, obsm=anndata.obsm, obsp=anndata.obsp, varm=anndata.varm, varp=anndata.varp, raw=raw, uns=uns, ) if soma._verbose: print(tiledbsc.util.format_elapsed(s, f"FINISH SOMA.to_anndata {soma.uri}")) return anndata # ---------------------------------------------------------------- def to_anndata_from_raw(soma: tiledbsc.SOMA) -> ad.AnnData: """ Extract only the raw parts as a new AnnData object. """ obs_df = soma.obs.to_dataframe() var_df = soma.raw.
668
19
7,300
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
inproject
to_dataframe
true
function
18
24
false
false
[ "from_dataframe", "to_dataframe", "uri", "df", "name", "attr_names", "attr_names_to_types", "attribute_filter", "dim_name", "dim_names", "dim_names_to_types", "dim_select", "exists", "has_attr_name", "ids", "keys", "shape", "tiledb_array_schema", "__init__", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "attr_names", "type": "function" }, { "name": "attr_names_to_types", "type": "function" }, { "name": "attribute_filter", "type": "function" }, { "name": "df", "type": "function" }, { "name": "dim_name", "type": "statement" }, { "name": "dim_names", "type": "function" }, { "name": "dim_names_to_types", "type": "function" }, { "name": "dim_select", "type": "function" }, { "name": "exists", "type": "function" }, { "name": "from_dataframe", "type": "function" }, { "name": "has_attr_name", "type": "function" }, { "name": "ids", "type": "function" }, { "name": "keys", "type": "function" }, { "name": "name", "type": "statement" }, { "name": "shape", "type": "function" }, { "name": "tiledb_array_schema", "type": "function" }, { "name": "to_dataframe", "type": "function" }, { "name": "uri", "type": "statement" }, { "name": "_ctx", "type": "statement" }, { "name": "_indent", "type": "statement" }, { "name": "_object_type", "type": "function" }, { "name": "_open", "type": "function" }, { "name": "_soma_options", "type": "statement" }, { "name": "_verbose", "type": "statement" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.from_matrix_and_dim_values(anndata.X, anndata.obs.index, anndata.var.index) soma._add_object(soma.X) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obs.from_dataframe(dataframe=anndata.obs, extent=256) soma._add_object(soma.obs) soma.var.from_dataframe(dataframe=anndata.var, extent=2048) soma._add_object(soma.var) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obsm.from_matrices_and_dim_values(anndata.obsm, anndata.obs_names) soma._add_object(soma.obsm) soma.varm.from_matrices_and_dim_values(anndata.varm, anndata.var_names) soma._add_object(soma.varm) soma.obsp.from_matrices_and_dim_values(anndata.obsp, anndata.obs_names) soma._add_object(soma.obsp) soma.varp.from_matrices_and_dim_values(anndata.varp, anndata.var_names) soma._add_object(soma.varp) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.raw != None: soma.raw.from_anndata(anndata) soma._add_object(soma.raw) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.uns != None: soma.uns.from_anndata_uns(anndata.uns) soma._add_object(soma.uns) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH WRITING {soma.uri}") ) # ---------------------------------------------------------------- def to_h5ad(soma: tiledbsc.SOMA, h5ad_path: str) -> None: """ Converts the soma group to anndata format and writes it to the specified .h5ad file. As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_h5ad {soma.uri} -> {h5ad_path}") anndata = to_anndata(soma) if soma._verbose: s2 = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START write {h5ad_path}") anndata.write_h5ad(h5ad_path) if soma._verbose: print( tiledbsc.util.format_elapsed(s2, f"{soma._indent}FINISH write {h5ad_path}") ) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.to_h5ad {soma.uri} -> {h5ad_path}" ) ) # ---------------------------------------------------------------- def to_anndata(soma: tiledbsc.SOMA) -> ad.AnnData: """ Converts the soma group to anndata. Choice of matrix formats is following what we often see in input .h5ad files: * X as scipy.sparse.csr_matrix * obs,var as pandas.dataframe * obsm,varm arrays as numpy.ndarray * obsp,varp arrays as scipy.sparse.csr_matrix As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_anndata {soma.uri}") obs_df = soma.obs.to_dataframe() var_df = soma.var.to_dataframe() X_mat = soma.X.data.to_csr_matrix(obs_df.index, var_df.index) obsm = soma.obsm.to_dict_of_csr() varm = soma.varm.to_dict_of_csr() # TODO print(" OBSP OUTGEST NOT WORKING YET") # obsp = soma.obsp.to_dict_of_csr() print(" VARP OUTGEST NOT WORKING YET") # varp = soma.varp.to_dict_of_csr() (raw_X, raw_var_df, raw_varm) = soma.raw.to_anndata_raw(obs_df.index) anndata = ad.AnnData(X=X_mat, obs=obs_df, var=var_df, obsm=obsm, varm=varm) raw = ad.Raw( anndata, X=raw_X, var=raw_var_df, varm=raw_varm, ) uns = soma.uns.to_dict_of_matrices() anndata = ad.AnnData( X=anndata.X, dtype=None if anndata.X is None else anndata.X.dtype, # some datasets have no X obs=anndata.obs, var=anndata.var, obsm=anndata.obsm, obsp=anndata.obsp, varm=anndata.varm, varp=anndata.varp, raw=raw, uns=uns, ) if soma._verbose: print(tiledbsc.util.format_elapsed(s, f"FINISH SOMA.to_anndata {soma.uri}")) return anndata # ---------------------------------------------------------------- def to_anndata_from_raw(soma: tiledbsc.SOMA) -> ad.AnnData: """ Extract only the raw parts as a new AnnData object. """ obs_df = soma.obs.to_dataframe() var_df = soma.raw.var.
670
19
7,336
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
inproject
X
true
statement
9
22
false
true
[ "to_anndata_raw", "var", "X", "uri", "varm", "exists", "from_anndata", "name", "varp", "__init__", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "exists", "type": "function" }, { "name": "from_anndata", "type": "function" }, { "name": "name", "type": "statement" }, { "name": "to_anndata_raw", "type": "function" }, { "name": "uri", "type": "statement" }, { "name": "var", "type": "statement" }, { "name": "varm", "type": "statement" }, { "name": "varp", "type": "statement" }, { "name": "X", "type": "statement" }, { "name": "_add_object", "type": "function" }, { "name": "_create", "type": "function" }, { "name": "_ctx", "type": "statement" }, { "name": "_get_member_names", "type": "function" }, { "name": "_get_member_names_to_uris", "type": "function" }, { "name": "_get_member_uris", "type": "function" }, { "name": "_indent", "type": "statement" }, { "name": "_object_type", "type": "function" }, { "name": "_open", "type": "function" }, { "name": "_open_withlessly", "type": "function" }, { "name": "_remove_object", "type": "function" }, { "name": "_soma_options", "type": "statement" }, { "name": "_verbose", "type": "statement" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.from_matrix_and_dim_values(anndata.X, anndata.obs.index, anndata.var.index) soma._add_object(soma.X) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obs.from_dataframe(dataframe=anndata.obs, extent=256) soma._add_object(soma.obs) soma.var.from_dataframe(dataframe=anndata.var, extent=2048) soma._add_object(soma.var) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obsm.from_matrices_and_dim_values(anndata.obsm, anndata.obs_names) soma._add_object(soma.obsm) soma.varm.from_matrices_and_dim_values(anndata.varm, anndata.var_names) soma._add_object(soma.varm) soma.obsp.from_matrices_and_dim_values(anndata.obsp, anndata.obs_names) soma._add_object(soma.obsp) soma.varp.from_matrices_and_dim_values(anndata.varp, anndata.var_names) soma._add_object(soma.varp) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.raw != None: soma.raw.from_anndata(anndata) soma._add_object(soma.raw) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.uns != None: soma.uns.from_anndata_uns(anndata.uns) soma._add_object(soma.uns) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH WRITING {soma.uri}") ) # ---------------------------------------------------------------- def to_h5ad(soma: tiledbsc.SOMA, h5ad_path: str) -> None: """ Converts the soma group to anndata format and writes it to the specified .h5ad file. As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_h5ad {soma.uri} -> {h5ad_path}") anndata = to_anndata(soma) if soma._verbose: s2 = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START write {h5ad_path}") anndata.write_h5ad(h5ad_path) if soma._verbose: print( tiledbsc.util.format_elapsed(s2, f"{soma._indent}FINISH write {h5ad_path}") ) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.to_h5ad {soma.uri} -> {h5ad_path}" ) ) # ---------------------------------------------------------------- def to_anndata(soma: tiledbsc.SOMA) -> ad.AnnData: """ Converts the soma group to anndata. Choice of matrix formats is following what we often see in input .h5ad files: * X as scipy.sparse.csr_matrix * obs,var as pandas.dataframe * obsm,varm arrays as numpy.ndarray * obsp,varp arrays as scipy.sparse.csr_matrix As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_anndata {soma.uri}") obs_df = soma.obs.to_dataframe() var_df = soma.var.to_dataframe() X_mat = soma.X.data.to_csr_matrix(obs_df.index, var_df.index) obsm = soma.obsm.to_dict_of_csr() varm = soma.varm.to_dict_of_csr() # TODO print(" OBSP OUTGEST NOT WORKING YET") # obsp = soma.obsp.to_dict_of_csr() print(" VARP OUTGEST NOT WORKING YET") # varp = soma.varp.to_dict_of_csr() (raw_X, raw_var_df, raw_varm) = soma.raw.to_anndata_raw(obs_df.index) anndata = ad.AnnData(X=X_mat, obs=obs_df, var=var_df, obsm=obsm, varm=varm) raw = ad.Raw( anndata, X=raw_X, var=raw_var_df, varm=raw_varm, ) uns = soma.uns.to_dict_of_matrices() anndata = ad.AnnData( X=anndata.X, dtype=None if anndata.X is None else anndata.X.dtype, # some datasets have no X obs=anndata.obs, var=anndata.var, obsm=anndata.obsm, obsp=anndata.obsp, varm=anndata.varm, varp=anndata.varp, raw=raw, uns=uns, ) if soma._verbose: print(tiledbsc.util.format_elapsed(s, f"FINISH SOMA.to_anndata {soma.uri}")) return anndata # ---------------------------------------------------------------- def to_anndata_from_raw(soma: tiledbsc.SOMA) -> ad.AnnData: """ Extract only the raw parts as a new AnnData object. """ obs_df = soma.obs.to_dataframe() var_df = soma.raw.var.to_dataframe() X_mat = soma.raw.
671
19
7,338
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
inproject
data
true
statement
5
18
false
false
[ "uri", "data", "from_matrix_and_dim_values", "name", "exists", "__init__", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "data", "type": "statement" }, { "name": "exists", "type": "function" }, { "name": "from_matrix_and_dim_values", "type": "function" }, { "name": "name", "type": "statement" }, { "name": "uri", "type": "statement" }, { "name": "_add_object", "type": "function" }, { "name": "_create", "type": "function" }, { "name": "_ctx", "type": "statement" }, { "name": "_get_member_names", "type": "function" }, { "name": "_get_member_names_to_uris", "type": "function" }, { "name": "_get_member_uris", "type": "function" }, { "name": "_indent", "type": "statement" }, { "name": "_object_type", "type": "function" }, { "name": "_open", "type": "function" }, { "name": "_open_withlessly", "type": "function" }, { "name": "_remove_object", "type": "function" }, { "name": "_soma_options", "type": "statement" }, { "name": "_verbose", "type": "statement" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.from_matrix_and_dim_values(anndata.X, anndata.obs.index, anndata.var.index) soma._add_object(soma.X) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obs.from_dataframe(dataframe=anndata.obs, extent=256) soma._add_object(soma.obs) soma.var.from_dataframe(dataframe=anndata.var, extent=2048) soma._add_object(soma.var) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obsm.from_matrices_and_dim_values(anndata.obsm, anndata.obs_names) soma._add_object(soma.obsm) soma.varm.from_matrices_and_dim_values(anndata.varm, anndata.var_names) soma._add_object(soma.varm) soma.obsp.from_matrices_and_dim_values(anndata.obsp, anndata.obs_names) soma._add_object(soma.obsp) soma.varp.from_matrices_and_dim_values(anndata.varp, anndata.var_names) soma._add_object(soma.varp) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.raw != None: soma.raw.from_anndata(anndata) soma._add_object(soma.raw) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.uns != None: soma.uns.from_anndata_uns(anndata.uns) soma._add_object(soma.uns) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH WRITING {soma.uri}") ) # ---------------------------------------------------------------- def to_h5ad(soma: tiledbsc.SOMA, h5ad_path: str) -> None: """ Converts the soma group to anndata format and writes it to the specified .h5ad file. As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_h5ad {soma.uri} -> {h5ad_path}") anndata = to_anndata(soma) if soma._verbose: s2 = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START write {h5ad_path}") anndata.write_h5ad(h5ad_path) if soma._verbose: print( tiledbsc.util.format_elapsed(s2, f"{soma._indent}FINISH write {h5ad_path}") ) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.to_h5ad {soma.uri} -> {h5ad_path}" ) ) # ---------------------------------------------------------------- def to_anndata(soma: tiledbsc.SOMA) -> ad.AnnData: """ Converts the soma group to anndata. Choice of matrix formats is following what we often see in input .h5ad files: * X as scipy.sparse.csr_matrix * obs,var as pandas.dataframe * obsm,varm arrays as numpy.ndarray * obsp,varp arrays as scipy.sparse.csr_matrix As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_anndata {soma.uri}") obs_df = soma.obs.to_dataframe() var_df = soma.var.to_dataframe() X_mat = soma.X.data.to_csr_matrix(obs_df.index, var_df.index) obsm = soma.obsm.to_dict_of_csr() varm = soma.varm.to_dict_of_csr() # TODO print(" OBSP OUTGEST NOT WORKING YET") # obsp = soma.obsp.to_dict_of_csr() print(" VARP OUTGEST NOT WORKING YET") # varp = soma.varp.to_dict_of_csr() (raw_X, raw_var_df, raw_varm) = soma.raw.to_anndata_raw(obs_df.index) anndata = ad.AnnData(X=X_mat, obs=obs_df, var=var_df, obsm=obsm, varm=varm) raw = ad.Raw( anndata, X=raw_X, var=raw_var_df, varm=raw_varm, ) uns = soma.uns.to_dict_of_matrices() anndata = ad.AnnData( X=anndata.X, dtype=None if anndata.X is None else anndata.X.dtype, # some datasets have no X obs=anndata.obs, var=anndata.var, obsm=anndata.obsm, obsp=anndata.obsp, varm=anndata.varm, varp=anndata.varp, raw=raw, uns=uns, ) if soma._verbose: print(tiledbsc.util.format_elapsed(s, f"FINISH SOMA.to_anndata {soma.uri}")) return anndata # ---------------------------------------------------------------- def to_anndata_from_raw(soma: tiledbsc.SOMA) -> ad.AnnData: """ Extract only the raw parts as a new AnnData object. """ obs_df = soma.obs.to_dataframe() var_df = soma.raw.var.to_dataframe() X_mat = soma.raw.X.
672
19
7,343
single-cell-data__tiledb-soma
e64f89e6251f221bd91c49a997d7fb67de3ed349
apis/python/src/tiledbsc/io/anndata.py
inproject
to_csr_matrix
true
function
20
28
false
false
[ "to_csr_matrix", "uri", "from_matrix_and_dim_values", "df", "name", "attr_name", "attr_names", "attr_names_to_types", "col_dim_name", "dim_names", "dim_names_to_types", "dim_select", "exists", "has_attr_name", "ingest_data_cols_chunked", "ingest_data_dense_rows_chunked", "ingest_data_rows_chunked", "ingest_data_whole", "row_dim_name", "tiledb_array_schema", "__init__", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "attr_name", "type": "statement" }, { "name": "attr_names", "type": "function" }, { "name": "attr_names_to_types", "type": "function" }, { "name": "col_dim_name", "type": "statement" }, { "name": "df", "type": "function" }, { "name": "dim_names", "type": "function" }, { "name": "dim_names_to_types", "type": "function" }, { "name": "dim_select", "type": "function" }, { "name": "exists", "type": "function" }, { "name": "from_matrix_and_dim_values", "type": "function" }, { "name": "has_attr_name", "type": "function" }, { "name": "ingest_data_cols_chunked", "type": "function" }, { "name": "ingest_data_dense_rows_chunked", "type": "function" }, { "name": "ingest_data_rows_chunked", "type": "function" }, { "name": "ingest_data_whole", "type": "function" }, { "name": "name", "type": "statement" }, { "name": "row_dim_name", "type": "statement" }, { "name": "tiledb_array_schema", "type": "function" }, { "name": "to_csr_matrix", "type": "function" }, { "name": "uri", "type": "statement" }, { "name": "_create_empty_array", "type": "function" }, { "name": "_ctx", "type": "statement" }, { "name": "_indent", "type": "statement" }, { "name": "_ingest_data", "type": "function" }, { "name": "_object_type", "type": "function" }, { "name": "_open", "type": "function" }, { "name": "_soma_options", "type": "statement" }, { "name": "_verbose", "type": "statement" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import tiledbsc import tiledbsc.util import tiledbsc.util_ann import anndata as ad # ---------------------------------------------------------------- def from_h5ad(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads an .h5ad file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_h5ad {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = ad.read_h5ad(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_h5ad {input_path} -> {soma.uri}" ) ) # ---------------------------------------------------------------- def from_10x(soma: tiledbsc.SOMA, input_path: str) -> None: """ Reads a 10X file and writes to a TileDB group structure. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.from_10x {input_path} -> {soma.uri}") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START READING {input_path}") anndata = scanpy.read_10x_h5(input_path) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"{soma._indent}FINISH READING {input_path}" ) ) from_anndata(soma, anndata) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.from_10x {input_path} -> {soma.uri}" ) ) return anndata # ---------------------------------------------------------------- def from_anndata(soma: tiledbsc.SOMA, anndata: ad.AnnData) -> None: """ Top-level writer method for creating a TileDB group for a SOMA object. """ # Without _at least_ an index, there is nothing to indicate the dimension indices. if anndata.obs.index.empty or anndata.var.index.empty: raise NotImplementedError("Empty AnnData.obs or AnnData.var unsupported.") if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START DECATEGORICALIZING") anndata.obs_names_make_unique() anndata.var_names_make_unique() anndata = tiledbsc.util_ann._decategoricalize(anndata) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH DECATEGORICALIZING") ) if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START WRITING {soma.uri}") # Must be done first, to create the parent directory soma._create() # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.X.from_matrix_and_dim_values(anndata.X, anndata.obs.index, anndata.var.index) soma._add_object(soma.X) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obs.from_dataframe(dataframe=anndata.obs, extent=256) soma._add_object(soma.obs) soma.var.from_dataframe(dataframe=anndata.var, extent=2048) soma._add_object(soma.var) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - soma.obsm.from_matrices_and_dim_values(anndata.obsm, anndata.obs_names) soma._add_object(soma.obsm) soma.varm.from_matrices_and_dim_values(anndata.varm, anndata.var_names) soma._add_object(soma.varm) soma.obsp.from_matrices_and_dim_values(anndata.obsp, anndata.obs_names) soma._add_object(soma.obsp) soma.varp.from_matrices_and_dim_values(anndata.varp, anndata.var_names) soma._add_object(soma.varp) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.raw != None: soma.raw.from_anndata(anndata) soma._add_object(soma.raw) # - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - if anndata.uns != None: soma.uns.from_anndata_uns(anndata.uns) soma._add_object(soma.uns) if soma._verbose: print( tiledbsc.util.format_elapsed(s, f"{soma._indent}FINISH WRITING {soma.uri}") ) # ---------------------------------------------------------------- def to_h5ad(soma: tiledbsc.SOMA, h5ad_path: str) -> None: """ Converts the soma group to anndata format and writes it to the specified .h5ad file. As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_h5ad {soma.uri} -> {h5ad_path}") anndata = to_anndata(soma) if soma._verbose: s2 = tiledbsc.util.get_start_stamp() print(f"{soma._indent}START write {h5ad_path}") anndata.write_h5ad(h5ad_path) if soma._verbose: print( tiledbsc.util.format_elapsed(s2, f"{soma._indent}FINISH write {h5ad_path}") ) if soma._verbose: print( tiledbsc.util.format_elapsed( s, f"FINISH SOMA.to_h5ad {soma.uri} -> {h5ad_path}" ) ) # ---------------------------------------------------------------- def to_anndata(soma: tiledbsc.SOMA) -> ad.AnnData: """ Converts the soma group to anndata. Choice of matrix formats is following what we often see in input .h5ad files: * X as scipy.sparse.csr_matrix * obs,var as pandas.dataframe * obsm,varm arrays as numpy.ndarray * obsp,varp arrays as scipy.sparse.csr_matrix As of 2022-05-05 this is an incomplete prototype. """ if soma._verbose: s = tiledbsc.util.get_start_stamp() print(f"START SOMA.to_anndata {soma.uri}") obs_df = soma.obs.to_dataframe() var_df = soma.var.to_dataframe() X_mat = soma.X.data.to_csr_matrix(obs_df.index, var_df.index) obsm = soma.obsm.to_dict_of_csr() varm = soma.varm.to_dict_of_csr() # TODO print(" OBSP OUTGEST NOT WORKING YET") # obsp = soma.obsp.to_dict_of_csr() print(" VARP OUTGEST NOT WORKING YET") # varp = soma.varp.to_dict_of_csr() (raw_X, raw_var_df, raw_varm) = soma.raw.to_anndata_raw(obs_df.index) anndata = ad.AnnData(X=X_mat, obs=obs_df, var=var_df, obsm=obsm, varm=varm) raw = ad.Raw( anndata, X=raw_X, var=raw_var_df, varm=raw_varm, ) uns = soma.uns.to_dict_of_matrices() anndata = ad.AnnData( X=anndata.X, dtype=None if anndata.X is None else anndata.X.dtype, # some datasets have no X obs=anndata.obs, var=anndata.var, obsm=anndata.obsm, obsp=anndata.obsp, varm=anndata.varm, varp=anndata.varp, raw=raw, uns=uns, ) if soma._verbose: print(tiledbsc.util.format_elapsed(s, f"FINISH SOMA.to_anndata {soma.uri}")) return anndata # ---------------------------------------------------------------- def to_anndata_from_raw(soma: tiledbsc.SOMA) -> ad.AnnData: """ Extract only the raw parts as a new AnnData object. """ obs_df = soma.obs.to_dataframe() var_df = soma.raw.var.to_dataframe() X_mat = soma.raw.X.data.
673
20
577
single-cell-data__tiledb-soma
a367a8ce8cdffbcd4c14677da1eeeb14debc1876
apis/python/examples/pre-query.py
inproject
SOMACollection
true
class
32
35
false
true
[ "SOMACollection", "soma", "SOMA", "soma_options", "soma_collection", "annotation_matrix", "annotation_matrix_group", "annotation_pairwise_matrix_group", "AnnotationMatrix", "AnnotationMatrixGroup", "AnnotationPairwiseMatrixGroup", "assay_matrix", "assay_matrix_group", "AssayMatrix", "AssayMatrixGroup", "describe_ann_file", "raw_group", "RawGroup", "show_single_cell_group", "SOMAOptions", "tiledb_array", "tiledb_group", "tiledb_object", "TileDBArray", "TileDBGroup", "TileDBObject", "uns_array", "uns_group", "UnsArray", "UnsGroup", "util_ann", "util_tiledb" ]
[ { "name": "annotation_dataframe", "type": "module" }, { "name": "annotation_matrix", "type": "module" }, { "name": "annotation_matrix_group", "type": "module" }, { "name": "annotation_pairwise_matrix_group", "type": "module" }, { "name": "AnnotationMatrix", "type": "class" }, { "name": "AnnotationMatrixGroup", "type": "class" }, { "name": "AnnotationPairwiseMatrixGroup", "type": "class" }, { "name": "assay_matrix", "type": "module" }, { "name": "assay_matrix_group", "type": "module" }, { "name": "AssayMatrix", "type": "class" }, { "name": "AssayMatrixGroup", "type": "class" }, { "name": "describe_ann_file", "type": "function" }, { "name": "io", "type": "module" }, { "name": "raw_group", "type": "module" }, { "name": "RawGroup", "type": "class" }, { "name": "show_single_cell_group", "type": "function" }, { "name": "SOMA", "type": "class" }, { "name": "soma", "type": "module" }, { "name": "soma_collection", "type": "module" }, { "name": "soma_options", "type": "module" }, { "name": "SOMACollection", "type": "class" }, { "name": "SOMAOptions", "type": "class" }, { "name": "tiledb_array", "type": "module" }, { "name": "tiledb_group", "type": "module" }, { "name": "tiledb_object", "type": "module" }, { "name": "TileDBArray", "type": "class" }, { "name": "TileDBGroup", "type": "class" }, { "name": "TileDBObject", "type": "class" }, { "name": "uns_array", "type": "module" }, { "name": "uns_group", "type": "module" }, { "name": "UnsArray", "type": "class" }, { "name": "UnsGroup", "type": "class" }, { "name": "util", "type": "module" }, { "name": "util_ann", "type": "module" }, { "name": "util_tiledb", "type": "module" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
#!/usr/bin/env python # Invoke this with, for example, # # peek-soco ./soma-collection # # -- then you can inspect the SOMACollection object import tiledb import tiledbsc as t import pandas import sys, os from typing import List, Dict # ================================================================ def main(): if len(sys.argv) == 1: soco_path = "soma-collection" elif len(sys.argv) == 2: soco_path = sys.argv[1] else: print(f"{sys.argv[0]}: need just one soma-collection path.", file=sys.stderr) sys.exit(1) soco = t.
674
20
2,901
single-cell-data__tiledb-soma
a367a8ce8cdffbcd4c14677da1eeeb14debc1876
apis/python/examples/pre-query.py
Unknown
SOMACollection
true
class
33
35
false
false
[ "SOMACollection", "None", "soma", "SOMA", "soma_options", "annotation_matrix", "annotation_matrix_group", "annotation_pairwise_matrix_group", "AnnotationMatrix", "AnnotationMatrixGroup", "AnnotationPairwiseMatrixGroup", "assay_matrix", "assay_matrix_group", "AssayMatrix", "AssayMatrixGroup", "describe_ann_file", "raw_group", "RawGroup", "show_single_cell_group", "soma_collection", "SOMAOptions", "tiledb_array", "tiledb_group", "tiledb_object", "TileDBArray", "TileDBGroup", "TileDBObject", "uns_array", "uns_group", "UnsArray", "UnsGroup", "util_ann", "util_tiledb" ]
[ { "name": "annotation_dataframe", "type": "module" }, { "name": "annotation_matrix", "type": "module" }, { "name": "annotation_matrix_group", "type": "module" }, { "name": "annotation_pairwise_matrix_group", "type": "module" }, { "name": "AnnotationMatrix", "type": "class" }, { "name": "AnnotationMatrixGroup", "type": "class" }, { "name": "AnnotationPairwiseMatrixGroup", "type": "class" }, { "name": "assay_matrix", "type": "module" }, { "name": "assay_matrix_group", "type": "module" }, { "name": "AssayMatrix", "type": "class" }, { "name": "AssayMatrixGroup", "type": "class" }, { "name": "describe_ann_file", "type": "function" }, { "name": "io", "type": "module" }, { "name": "raw_group", "type": "module" }, { "name": "RawGroup", "type": "class" }, { "name": "show_single_cell_group", "type": "function" }, { "name": "SOMA", "type": "class" }, { "name": "soma", "type": "module" }, { "name": "soma_collection", "type": "module" }, { "name": "soma_options", "type": "module" }, { "name": "SOMACollection", "type": "class" }, { "name": "SOMAOptions", "type": "class" }, { "name": "tiledb_array", "type": "module" }, { "name": "tiledb_group", "type": "module" }, { "name": "tiledb_object", "type": "module" }, { "name": "TileDBArray", "type": "class" }, { "name": "TileDBGroup", "type": "class" }, { "name": "TileDBObject", "type": "class" }, { "name": "uns_array", "type": "module" }, { "name": "uns_group", "type": "module" }, { "name": "UnsArray", "type": "class" }, { "name": "UnsGroup", "type": "class" }, { "name": "util", "type": "module" }, { "name": "util_ann", "type": "module" }, { "name": "util_tiledb", "type": "module" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
#!/usr/bin/env python # Invoke this with, for example, # # peek-soco ./soma-collection # # -- then you can inspect the SOMACollection object import tiledb import tiledbsc as t import pandas import sys, os from typing import List, Dict # ================================================================ def main(): if len(sys.argv) == 1: soco_path = "soma-collection" elif len(sys.argv) == 2: soco_path = sys.argv[1] else: print(f"{sys.argv[0]}: need just one soma-collection path.", file=sys.stderr) sys.exit(1) soco = t.SOMACollection(soco_path) # print() # print("================================================================") # print("NAMES AND URIS") # print_names_and_uris(soco) # # print() # print("================================================================") # print("OBS NAMES") # show_obs_names(soco) # # print() # print("================================================================") # print("VAR NAMES") # show_var_names(soco) # # print() # print("================================================================") # print("SOMAS HAVING ALL THREE") # show_somas_with_all_three(soco) # # print() # print("================================================================") # print("OBS_ID COUNTS") # show_obs_id_counts(soco) # # print() # print("================================================================") # print("VAR_ID COUNTS") # show_var_id_counts(soco) # # print() # print("================================================================") # print("OBS UNIQUE VALUES FOR CELL_TYPE") # show_obs_column_unique_values(soco, "cell_type") # # print() # print("================================================================") # print("OBS UNIQUE VALUES FOR FEATURE_NAME") # show_var_column_unique_values(soco, "feature_name") # # print() # print("================================================================") # print("OBS VALUE COUNTS FOR CELL_TYPE AND TISSUE") # show_obs_value_counts(soco, ["cell_type", "tissue"]) # # print() # print("================================================================") # print("VAR VALUE COUNTS FOR CELL_TYPE AND FEATURE_NAME") # show_var_value_counts(soco, ["feature_name"]) print() print("================================================================") print("SHOW SOMAS HAVING") show_somas_having( soco, {"cell_type": ["B cell", "T cell"], "tissue": ["blood", "lung"]}, {"feature_name": ["MT-CO3"]}, ) # ---------------------------------------------------------------- def print_names_and_uris(soco: t.
675
20
3,094
single-cell-data__tiledb-soma
a367a8ce8cdffbcd4c14677da1eeeb14debc1876
apis/python/examples/pre-query.py
Unknown
SOMACollection
true
class
33
35
false
false
[ "SOMACollection", "None", "soma", "SOMA", "show_single_cell_group", "annotation_matrix", "annotation_matrix_group", "annotation_pairwise_matrix_group", "AnnotationMatrix", "AnnotationMatrixGroup", "AnnotationPairwiseMatrixGroup", "assay_matrix", "assay_matrix_group", "AssayMatrix", "AssayMatrixGroup", "describe_ann_file", "raw_group", "RawGroup", "soma_collection", "soma_options", "SOMAOptions", "tiledb_array", "tiledb_group", "tiledb_object", "TileDBArray", "TileDBGroup", "TileDBObject", "uns_array", "uns_group", "UnsArray", "UnsGroup", "util_ann", "util_tiledb" ]
[ { "name": "annotation_dataframe", "type": "module" }, { "name": "annotation_matrix", "type": "module" }, { "name": "annotation_matrix_group", "type": "module" }, { "name": "annotation_pairwise_matrix_group", "type": "module" }, { "name": "AnnotationMatrix", "type": "class" }, { "name": "AnnotationMatrixGroup", "type": "class" }, { "name": "AnnotationPairwiseMatrixGroup", "type": "class" }, { "name": "assay_matrix", "type": "module" }, { "name": "assay_matrix_group", "type": "module" }, { "name": "AssayMatrix", "type": "class" }, { "name": "AssayMatrixGroup", "type": "class" }, { "name": "describe_ann_file", "type": "function" }, { "name": "io", "type": "module" }, { "name": "raw_group", "type": "module" }, { "name": "RawGroup", "type": "class" }, { "name": "show_single_cell_group", "type": "function" }, { "name": "SOMA", "type": "class" }, { "name": "soma", "type": "module" }, { "name": "soma_collection", "type": "module" }, { "name": "soma_options", "type": "module" }, { "name": "SOMACollection", "type": "class" }, { "name": "SOMAOptions", "type": "class" }, { "name": "tiledb_array", "type": "module" }, { "name": "tiledb_group", "type": "module" }, { "name": "tiledb_object", "type": "module" }, { "name": "TileDBArray", "type": "class" }, { "name": "TileDBGroup", "type": "class" }, { "name": "TileDBObject", "type": "class" }, { "name": "uns_array", "type": "module" }, { "name": "uns_group", "type": "module" }, { "name": "UnsArray", "type": "class" }, { "name": "UnsGroup", "type": "class" }, { "name": "util", "type": "module" }, { "name": "util_ann", "type": "module" }, { "name": "util_tiledb", "type": "module" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
#!/usr/bin/env python # Invoke this with, for example, # # peek-soco ./soma-collection # # -- then you can inspect the SOMACollection object import tiledb import tiledbsc as t import pandas import sys, os from typing import List, Dict # ================================================================ def main(): if len(sys.argv) == 1: soco_path = "soma-collection" elif len(sys.argv) == 2: soco_path = sys.argv[1] else: print(f"{sys.argv[0]}: need just one soma-collection path.", file=sys.stderr) sys.exit(1) soco = t.SOMACollection(soco_path) # print() # print("================================================================") # print("NAMES AND URIS") # print_names_and_uris(soco) # # print() # print("================================================================") # print("OBS NAMES") # show_obs_names(soco) # # print() # print("================================================================") # print("VAR NAMES") # show_var_names(soco) # # print() # print("================================================================") # print("SOMAS HAVING ALL THREE") # show_somas_with_all_three(soco) # # print() # print("================================================================") # print("OBS_ID COUNTS") # show_obs_id_counts(soco) # # print() # print("================================================================") # print("VAR_ID COUNTS") # show_var_id_counts(soco) # # print() # print("================================================================") # print("OBS UNIQUE VALUES FOR CELL_TYPE") # show_obs_column_unique_values(soco, "cell_type") # # print() # print("================================================================") # print("OBS UNIQUE VALUES FOR FEATURE_NAME") # show_var_column_unique_values(soco, "feature_name") # # print() # print("================================================================") # print("OBS VALUE COUNTS FOR CELL_TYPE AND TISSUE") # show_obs_value_counts(soco, ["cell_type", "tissue"]) # # print() # print("================================================================") # print("VAR VALUE COUNTS FOR CELL_TYPE AND FEATURE_NAME") # show_var_value_counts(soco, ["feature_name"]) print() print("================================================================") print("SHOW SOMAS HAVING") show_somas_having( soco, {"cell_type": ["B cell", "T cell"], "tissue": ["blood", "lung"]}, {"feature_name": ["MT-CO3"]}, ) # ---------------------------------------------------------------- def print_names_and_uris(soco: t.SOMACollection) -> None: for soma in soco: print("%-40s %s" % (soma.name, soma.uri)) # ---------------------------------------------------------------- def show_obs_names(soco: t.
676
20
3,341
single-cell-data__tiledb-soma
a367a8ce8cdffbcd4c14677da1eeeb14debc1876
apis/python/examples/pre-query.py
Unknown
SOMACollection
true
class
33
35
false
false
[ "SOMACollection", "None", "soma", "SOMA", "show_single_cell_group", "annotation_matrix", "annotation_matrix_group", "annotation_pairwise_matrix_group", "AnnotationMatrix", "AnnotationMatrixGroup", "AnnotationPairwiseMatrixGroup", "assay_matrix", "assay_matrix_group", "AssayMatrix", "AssayMatrixGroup", "describe_ann_file", "raw_group", "RawGroup", "soma_collection", "soma_options", "SOMAOptions", "tiledb_array", "tiledb_group", "tiledb_object", "TileDBArray", "TileDBGroup", "TileDBObject", "uns_array", "uns_group", "UnsArray", "UnsGroup", "util_ann", "util_tiledb" ]
[ { "name": "annotation_dataframe", "type": "module" }, { "name": "annotation_matrix", "type": "module" }, { "name": "annotation_matrix_group", "type": "module" }, { "name": "annotation_pairwise_matrix_group", "type": "module" }, { "name": "AnnotationMatrix", "type": "class" }, { "name": "AnnotationMatrixGroup", "type": "class" }, { "name": "AnnotationPairwiseMatrixGroup", "type": "class" }, { "name": "assay_matrix", "type": "module" }, { "name": "assay_matrix_group", "type": "module" }, { "name": "AssayMatrix", "type": "class" }, { "name": "AssayMatrixGroup", "type": "class" }, { "name": "describe_ann_file", "type": "function" }, { "name": "io", "type": "module" }, { "name": "raw_group", "type": "module" }, { "name": "RawGroup", "type": "class" }, { "name": "show_single_cell_group", "type": "function" }, { "name": "SOMA", "type": "class" }, { "name": "soma", "type": "module" }, { "name": "soma_collection", "type": "module" }, { "name": "soma_options", "type": "module" }, { "name": "SOMACollection", "type": "class" }, { "name": "SOMAOptions", "type": "class" }, { "name": "tiledb_array", "type": "module" }, { "name": "tiledb_group", "type": "module" }, { "name": "tiledb_object", "type": "module" }, { "name": "TileDBArray", "type": "class" }, { "name": "TileDBGroup", "type": "class" }, { "name": "TileDBObject", "type": "class" }, { "name": "uns_array", "type": "module" }, { "name": "uns_group", "type": "module" }, { "name": "UnsArray", "type": "class" }, { "name": "UnsGroup", "type": "class" }, { "name": "util", "type": "module" }, { "name": "util_ann", "type": "module" }, { "name": "util_tiledb", "type": "module" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
#!/usr/bin/env python # Invoke this with, for example, # # peek-soco ./soma-collection # # -- then you can inspect the SOMACollection object import tiledb import tiledbsc as t import pandas import sys, os from typing import List, Dict # ================================================================ def main(): if len(sys.argv) == 1: soco_path = "soma-collection" elif len(sys.argv) == 2: soco_path = sys.argv[1] else: print(f"{sys.argv[0]}: need just one soma-collection path.", file=sys.stderr) sys.exit(1) soco = t.SOMACollection(soco_path) # print() # print("================================================================") # print("NAMES AND URIS") # print_names_and_uris(soco) # # print() # print("================================================================") # print("OBS NAMES") # show_obs_names(soco) # # print() # print("================================================================") # print("VAR NAMES") # show_var_names(soco) # # print() # print("================================================================") # print("SOMAS HAVING ALL THREE") # show_somas_with_all_three(soco) # # print() # print("================================================================") # print("OBS_ID COUNTS") # show_obs_id_counts(soco) # # print() # print("================================================================") # print("VAR_ID COUNTS") # show_var_id_counts(soco) # # print() # print("================================================================") # print("OBS UNIQUE VALUES FOR CELL_TYPE") # show_obs_column_unique_values(soco, "cell_type") # # print() # print("================================================================") # print("OBS UNIQUE VALUES FOR FEATURE_NAME") # show_var_column_unique_values(soco, "feature_name") # # print() # print("================================================================") # print("OBS VALUE COUNTS FOR CELL_TYPE AND TISSUE") # show_obs_value_counts(soco, ["cell_type", "tissue"]) # # print() # print("================================================================") # print("VAR VALUE COUNTS FOR CELL_TYPE AND FEATURE_NAME") # show_var_value_counts(soco, ["feature_name"]) print() print("================================================================") print("SHOW SOMAS HAVING") show_somas_having( soco, {"cell_type": ["B cell", "T cell"], "tissue": ["blood", "lung"]}, {"feature_name": ["MT-CO3"]}, ) # ---------------------------------------------------------------- def print_names_and_uris(soco: t.SOMACollection) -> None: for soma in soco: print("%-40s %s" % (soma.name, soma.uri)) # ---------------------------------------------------------------- def show_obs_names(soco: t.SOMACollection) -> None: for soma in soco: print(soma.uri) for attr_name in soma.obs.keys(): print(" obs", attr_name) # ---------------------------------------------------------------- def show_var_names(soco: t.
677
20
3,599
single-cell-data__tiledb-soma
a367a8ce8cdffbcd4c14677da1eeeb14debc1876
apis/python/examples/pre-query.py
Unknown
SOMACollection
true
class
33
35
false
false
[ "SOMACollection", "soma", "SOMA", "None", "soma_options", "annotation_matrix", "annotation_matrix_group", "annotation_pairwise_matrix_group", "AnnotationMatrix", "AnnotationMatrixGroup", "AnnotationPairwiseMatrixGroup", "assay_matrix", "assay_matrix_group", "AssayMatrix", "AssayMatrixGroup", "describe_ann_file", "raw_group", "RawGroup", "show_single_cell_group", "soma_collection", "SOMAOptions", "tiledb_array", "tiledb_group", "tiledb_object", "TileDBArray", "TileDBGroup", "TileDBObject", "uns_array", "uns_group", "UnsArray", "UnsGroup", "util_ann", "util_tiledb" ]
[ { "name": "annotation_dataframe", "type": "module" }, { "name": "annotation_matrix", "type": "module" }, { "name": "annotation_matrix_group", "type": "module" }, { "name": "annotation_pairwise_matrix_group", "type": "module" }, { "name": "AnnotationMatrix", "type": "class" }, { "name": "AnnotationMatrixGroup", "type": "class" }, { "name": "AnnotationPairwiseMatrixGroup", "type": "class" }, { "name": "assay_matrix", "type": "module" }, { "name": "assay_matrix_group", "type": "module" }, { "name": "AssayMatrix", "type": "class" }, { "name": "AssayMatrixGroup", "type": "class" }, { "name": "describe_ann_file", "type": "function" }, { "name": "io", "type": "module" }, { "name": "raw_group", "type": "module" }, { "name": "RawGroup", "type": "class" }, { "name": "show_single_cell_group", "type": "function" }, { "name": "SOMA", "type": "class" }, { "name": "soma", "type": "module" }, { "name": "soma_collection", "type": "module" }, { "name": "soma_options", "type": "module" }, { "name": "SOMACollection", "type": "class" }, { "name": "SOMAOptions", "type": "class" }, { "name": "tiledb_array", "type": "module" }, { "name": "tiledb_group", "type": "module" }, { "name": "tiledb_object", "type": "module" }, { "name": "TileDBArray", "type": "class" }, { "name": "TileDBGroup", "type": "class" }, { "name": "TileDBObject", "type": "class" }, { "name": "uns_array", "type": "module" }, { "name": "uns_group", "type": "module" }, { "name": "UnsArray", "type": "class" }, { "name": "UnsGroup", "type": "class" }, { "name": "util", "type": "module" }, { "name": "util_ann", "type": "module" }, { "name": "util_tiledb", "type": "module" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
#!/usr/bin/env python # Invoke this with, for example, # # peek-soco ./soma-collection # # -- then you can inspect the SOMACollection object import tiledb import tiledbsc as t import pandas import sys, os from typing import List, Dict # ================================================================ def main(): if len(sys.argv) == 1: soco_path = "soma-collection" elif len(sys.argv) == 2: soco_path = sys.argv[1] else: print(f"{sys.argv[0]}: need just one soma-collection path.", file=sys.stderr) sys.exit(1) soco = t.SOMACollection(soco_path) # print() # print("================================================================") # print("NAMES AND URIS") # print_names_and_uris(soco) # # print() # print("================================================================") # print("OBS NAMES") # show_obs_names(soco) # # print() # print("================================================================") # print("VAR NAMES") # show_var_names(soco) # # print() # print("================================================================") # print("SOMAS HAVING ALL THREE") # show_somas_with_all_three(soco) # # print() # print("================================================================") # print("OBS_ID COUNTS") # show_obs_id_counts(soco) # # print() # print("================================================================") # print("VAR_ID COUNTS") # show_var_id_counts(soco) # # print() # print("================================================================") # print("OBS UNIQUE VALUES FOR CELL_TYPE") # show_obs_column_unique_values(soco, "cell_type") # # print() # print("================================================================") # print("OBS UNIQUE VALUES FOR FEATURE_NAME") # show_var_column_unique_values(soco, "feature_name") # # print() # print("================================================================") # print("OBS VALUE COUNTS FOR CELL_TYPE AND TISSUE") # show_obs_value_counts(soco, ["cell_type", "tissue"]) # # print() # print("================================================================") # print("VAR VALUE COUNTS FOR CELL_TYPE AND FEATURE_NAME") # show_var_value_counts(soco, ["feature_name"]) print() print("================================================================") print("SHOW SOMAS HAVING") show_somas_having( soco, {"cell_type": ["B cell", "T cell"], "tissue": ["blood", "lung"]}, {"feature_name": ["MT-CO3"]}, ) # ---------------------------------------------------------------- def print_names_and_uris(soco: t.SOMACollection) -> None: for soma in soco: print("%-40s %s" % (soma.name, soma.uri)) # ---------------------------------------------------------------- def show_obs_names(soco: t.SOMACollection) -> None: for soma in soco: print(soma.uri) for attr_name in soma.obs.keys(): print(" obs", attr_name) # ---------------------------------------------------------------- def show_var_names(soco: t.SOMACollection) -> None: for soma in soco: print(soma.uri) for attr_name in soma.var.keys(): print(" var", attr_name) # ---------------------------------------------------------------- def show_somas_with_all_three(soco: t.
678
20
3,941
single-cell-data__tiledb-soma
a367a8ce8cdffbcd4c14677da1eeeb14debc1876
apis/python/examples/pre-query.py
Unknown
SOMACollection
true
class
33
35
false
false
[ "SOMACollection", "None", "soma", "SOMA", "show_single_cell_group", "annotation_matrix", "annotation_matrix_group", "annotation_pairwise_matrix_group", "AnnotationMatrix", "AnnotationMatrixGroup", "AnnotationPairwiseMatrixGroup", "assay_matrix", "assay_matrix_group", "AssayMatrix", "AssayMatrixGroup", "describe_ann_file", "raw_group", "RawGroup", "soma_collection", "soma_options", "SOMAOptions", "tiledb_array", "tiledb_group", "tiledb_object", "TileDBArray", "TileDBGroup", "TileDBObject", "uns_array", "uns_group", "UnsArray", "UnsGroup", "util_ann", "util_tiledb" ]
[ { "name": "annotation_dataframe", "type": "module" }, { "name": "annotation_matrix", "type": "module" }, { "name": "annotation_matrix_group", "type": "module" }, { "name": "annotation_pairwise_matrix_group", "type": "module" }, { "name": "AnnotationMatrix", "type": "class" }, { "name": "AnnotationMatrixGroup", "type": "class" }, { "name": "AnnotationPairwiseMatrixGroup", "type": "class" }, { "name": "assay_matrix", "type": "module" }, { "name": "assay_matrix_group", "type": "module" }, { "name": "AssayMatrix", "type": "class" }, { "name": "AssayMatrixGroup", "type": "class" }, { "name": "describe_ann_file", "type": "function" }, { "name": "io", "type": "module" }, { "name": "raw_group", "type": "module" }, { "name": "RawGroup", "type": "class" }, { "name": "show_single_cell_group", "type": "function" }, { "name": "SOMA", "type": "class" }, { "name": "soma", "type": "module" }, { "name": "soma_collection", "type": "module" }, { "name": "soma_options", "type": "module" }, { "name": "SOMACollection", "type": "class" }, { "name": "SOMAOptions", "type": "class" }, { "name": "tiledb_array", "type": "module" }, { "name": "tiledb_group", "type": "module" }, { "name": "tiledb_object", "type": "module" }, { "name": "TileDBArray", "type": "class" }, { "name": "TileDBGroup", "type": "class" }, { "name": "TileDBObject", "type": "class" }, { "name": "uns_array", "type": "module" }, { "name": "uns_group", "type": "module" }, { "name": "UnsArray", "type": "class" }, { "name": "UnsGroup", "type": "class" }, { "name": "util", "type": "module" }, { "name": "util_ann", "type": "module" }, { "name": "util_tiledb", "type": "module" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
#!/usr/bin/env python # Invoke this with, for example, # # peek-soco ./soma-collection # # -- then you can inspect the SOMACollection object import tiledb import tiledbsc as t import pandas import sys, os from typing import List, Dict # ================================================================ def main(): if len(sys.argv) == 1: soco_path = "soma-collection" elif len(sys.argv) == 2: soco_path = sys.argv[1] else: print(f"{sys.argv[0]}: need just one soma-collection path.", file=sys.stderr) sys.exit(1) soco = t.SOMACollection(soco_path) # print() # print("================================================================") # print("NAMES AND URIS") # print_names_and_uris(soco) # # print() # print("================================================================") # print("OBS NAMES") # show_obs_names(soco) # # print() # print("================================================================") # print("VAR NAMES") # show_var_names(soco) # # print() # print("================================================================") # print("SOMAS HAVING ALL THREE") # show_somas_with_all_three(soco) # # print() # print("================================================================") # print("OBS_ID COUNTS") # show_obs_id_counts(soco) # # print() # print("================================================================") # print("VAR_ID COUNTS") # show_var_id_counts(soco) # # print() # print("================================================================") # print("OBS UNIQUE VALUES FOR CELL_TYPE") # show_obs_column_unique_values(soco, "cell_type") # # print() # print("================================================================") # print("OBS UNIQUE VALUES FOR FEATURE_NAME") # show_var_column_unique_values(soco, "feature_name") # # print() # print("================================================================") # print("OBS VALUE COUNTS FOR CELL_TYPE AND TISSUE") # show_obs_value_counts(soco, ["cell_type", "tissue"]) # # print() # print("================================================================") # print("VAR VALUE COUNTS FOR CELL_TYPE AND FEATURE_NAME") # show_var_value_counts(soco, ["feature_name"]) print() print("================================================================") print("SHOW SOMAS HAVING") show_somas_having( soco, {"cell_type": ["B cell", "T cell"], "tissue": ["blood", "lung"]}, {"feature_name": ["MT-CO3"]}, ) # ---------------------------------------------------------------- def print_names_and_uris(soco: t.SOMACollection) -> None: for soma in soco: print("%-40s %s" % (soma.name, soma.uri)) # ---------------------------------------------------------------- def show_obs_names(soco: t.SOMACollection) -> None: for soma in soco: print(soma.uri) for attr_name in soma.obs.keys(): print(" obs", attr_name) # ---------------------------------------------------------------- def show_var_names(soco: t.SOMACollection) -> None: for soma in soco: print(soma.uri) for attr_name in soma.var.keys(): print(" var", attr_name) # ---------------------------------------------------------------- def show_somas_with_all_three(soco: t.SOMACollection) -> None: for soma in soco: if "cell_type" in soma.obs.attr_names(): if "tissue" in soma.obs.attr_names(): if "feature_name" in soma.var.attr_names(): print(soma.uri) # ---------------------------------------------------------------- def show_obs_id_counts(soco: t.
679
20
4,393
single-cell-data__tiledb-soma
a367a8ce8cdffbcd4c14677da1eeeb14debc1876
apis/python/examples/pre-query.py
Unknown
SOMACollection
true
class
33
35
false
false
[ "SOMACollection", "None", "soma", "SOMA", "show_single_cell_group", "annotation_matrix", "annotation_matrix_group", "annotation_pairwise_matrix_group", "AnnotationMatrix", "AnnotationMatrixGroup", "AnnotationPairwiseMatrixGroup", "assay_matrix", "assay_matrix_group", "AssayMatrix", "AssayMatrixGroup", "describe_ann_file", "raw_group", "RawGroup", "soma_collection", "soma_options", "SOMAOptions", "tiledb_array", "tiledb_group", "tiledb_object", "TileDBArray", "TileDBGroup", "TileDBObject", "uns_array", "uns_group", "UnsArray", "UnsGroup", "util_ann", "util_tiledb" ]
[ { "name": "annotation_dataframe", "type": "module" }, { "name": "annotation_matrix", "type": "module" }, { "name": "annotation_matrix_group", "type": "module" }, { "name": "annotation_pairwise_matrix_group", "type": "module" }, { "name": "AnnotationMatrix", "type": "class" }, { "name": "AnnotationMatrixGroup", "type": "class" }, { "name": "AnnotationPairwiseMatrixGroup", "type": "class" }, { "name": "assay_matrix", "type": "module" }, { "name": "assay_matrix_group", "type": "module" }, { "name": "AssayMatrix", "type": "class" }, { "name": "AssayMatrixGroup", "type": "class" }, { "name": "describe_ann_file", "type": "function" }, { "name": "io", "type": "module" }, { "name": "raw_group", "type": "module" }, { "name": "RawGroup", "type": "class" }, { "name": "show_single_cell_group", "type": "function" }, { "name": "SOMA", "type": "class" }, { "name": "soma", "type": "module" }, { "name": "soma_collection", "type": "module" }, { "name": "soma_options", "type": "module" }, { "name": "SOMACollection", "type": "class" }, { "name": "SOMAOptions", "type": "class" }, { "name": "tiledb_array", "type": "module" }, { "name": "tiledb_group", "type": "module" }, { "name": "tiledb_object", "type": "module" }, { "name": "TileDBArray", "type": "class" }, { "name": "TileDBGroup", "type": "class" }, { "name": "TileDBObject", "type": "class" }, { "name": "uns_array", "type": "module" }, { "name": "uns_group", "type": "module" }, { "name": "UnsArray", "type": "class" }, { "name": "UnsGroup", "type": "class" }, { "name": "util", "type": "module" }, { "name": "util_ann", "type": "module" }, { "name": "util_tiledb", "type": "module" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
#!/usr/bin/env python # Invoke this with, for example, # # peek-soco ./soma-collection # # -- then you can inspect the SOMACollection object import tiledb import tiledbsc as t import pandas import sys, os from typing import List, Dict # ================================================================ def main(): if len(sys.argv) == 1: soco_path = "soma-collection" elif len(sys.argv) == 2: soco_path = sys.argv[1] else: print(f"{sys.argv[0]}: need just one soma-collection path.", file=sys.stderr) sys.exit(1) soco = t.SOMACollection(soco_path) # print() # print("================================================================") # print("NAMES AND URIS") # print_names_and_uris(soco) # # print() # print("================================================================") # print("OBS NAMES") # show_obs_names(soco) # # print() # print("================================================================") # print("VAR NAMES") # show_var_names(soco) # # print() # print("================================================================") # print("SOMAS HAVING ALL THREE") # show_somas_with_all_three(soco) # # print() # print("================================================================") # print("OBS_ID COUNTS") # show_obs_id_counts(soco) # # print() # print("================================================================") # print("VAR_ID COUNTS") # show_var_id_counts(soco) # # print() # print("================================================================") # print("OBS UNIQUE VALUES FOR CELL_TYPE") # show_obs_column_unique_values(soco, "cell_type") # # print() # print("================================================================") # print("OBS UNIQUE VALUES FOR FEATURE_NAME") # show_var_column_unique_values(soco, "feature_name") # # print() # print("================================================================") # print("OBS VALUE COUNTS FOR CELL_TYPE AND TISSUE") # show_obs_value_counts(soco, ["cell_type", "tissue"]) # # print() # print("================================================================") # print("VAR VALUE COUNTS FOR CELL_TYPE AND FEATURE_NAME") # show_var_value_counts(soco, ["feature_name"]) print() print("================================================================") print("SHOW SOMAS HAVING") show_somas_having( soco, {"cell_type": ["B cell", "T cell"], "tissue": ["blood", "lung"]}, {"feature_name": ["MT-CO3"]}, ) # ---------------------------------------------------------------- def print_names_and_uris(soco: t.SOMACollection) -> None: for soma in soco: print("%-40s %s" % (soma.name, soma.uri)) # ---------------------------------------------------------------- def show_obs_names(soco: t.SOMACollection) -> None: for soma in soco: print(soma.uri) for attr_name in soma.obs.keys(): print(" obs", attr_name) # ---------------------------------------------------------------- def show_var_names(soco: t.SOMACollection) -> None: for soma in soco: print(soma.uri) for attr_name in soma.var.keys(): print(" var", attr_name) # ---------------------------------------------------------------- def show_somas_with_all_three(soco: t.SOMACollection) -> None: for soma in soco: if "cell_type" in soma.obs.attr_names(): if "tissue" in soma.obs.attr_names(): if "feature_name" in soma.var.attr_names(): print(soma.uri) # ---------------------------------------------------------------- def show_obs_id_counts(soco: t.SOMACollection) -> None: counts = {} for soma in soco: for oid in soma.obs.ids(): if oid in counts: counts[oid] += 1 else: counts[oid] = 1 df = pandas.DataFrame.from_dict( {"obs_id": counts.keys(), "counts": counts.values()} ) # print(df.head()) print(df) # ---------------------------------------------------------------- def show_var_id_counts(soco: t.
680
20
4,856
single-cell-data__tiledb-soma
a367a8ce8cdffbcd4c14677da1eeeb14debc1876
apis/python/examples/pre-query.py
Unknown
SOMACollection
true
class
33
35
false
false
[ "SOMACollection", "None", "soma", "SOMA", "show_single_cell_group", "annotation_matrix", "annotation_matrix_group", "annotation_pairwise_matrix_group", "AnnotationMatrix", "AnnotationMatrixGroup", "AnnotationPairwiseMatrixGroup", "assay_matrix", "assay_matrix_group", "AssayMatrix", "AssayMatrixGroup", "describe_ann_file", "raw_group", "RawGroup", "soma_collection", "soma_options", "SOMAOptions", "tiledb_array", "tiledb_group", "tiledb_object", "TileDBArray", "TileDBGroup", "TileDBObject", "uns_array", "uns_group", "UnsArray", "UnsGroup", "util_ann", "util_tiledb" ]
[ { "name": "annotation_dataframe", "type": "module" }, { "name": "annotation_matrix", "type": "module" }, { "name": "annotation_matrix_group", "type": "module" }, { "name": "annotation_pairwise_matrix_group", "type": "module" }, { "name": "AnnotationMatrix", "type": "class" }, { "name": "AnnotationMatrixGroup", "type": "class" }, { "name": "AnnotationPairwiseMatrixGroup", "type": "class" }, { "name": "assay_matrix", "type": "module" }, { "name": "assay_matrix_group", "type": "module" }, { "name": "AssayMatrix", "type": "class" }, { "name": "AssayMatrixGroup", "type": "class" }, { "name": "describe_ann_file", "type": "function" }, { "name": "io", "type": "module" }, { "name": "raw_group", "type": "module" }, { "name": "RawGroup", "type": "class" }, { "name": "show_single_cell_group", "type": "function" }, { "name": "SOMA", "type": "class" }, { "name": "soma", "type": "module" }, { "name": "soma_collection", "type": "module" }, { "name": "soma_options", "type": "module" }, { "name": "SOMACollection", "type": "class" }, { "name": "SOMAOptions", "type": "class" }, { "name": "tiledb_array", "type": "module" }, { "name": "tiledb_group", "type": "module" }, { "name": "tiledb_object", "type": "module" }, { "name": "TileDBArray", "type": "class" }, { "name": "TileDBGroup", "type": "class" }, { "name": "TileDBObject", "type": "class" }, { "name": "uns_array", "type": "module" }, { "name": "uns_group", "type": "module" }, { "name": "UnsArray", "type": "class" }, { "name": "UnsGroup", "type": "class" }, { "name": "util", "type": "module" }, { "name": "util_ann", "type": "module" }, { "name": "util_tiledb", "type": "module" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
#!/usr/bin/env python # Invoke this with, for example, # # peek-soco ./soma-collection # # -- then you can inspect the SOMACollection object import tiledb import tiledbsc as t import pandas import sys, os from typing import List, Dict # ================================================================ def main(): if len(sys.argv) == 1: soco_path = "soma-collection" elif len(sys.argv) == 2: soco_path = sys.argv[1] else: print(f"{sys.argv[0]}: need just one soma-collection path.", file=sys.stderr) sys.exit(1) soco = t.SOMACollection(soco_path) # print() # print("================================================================") # print("NAMES AND URIS") # print_names_and_uris(soco) # # print() # print("================================================================") # print("OBS NAMES") # show_obs_names(soco) # # print() # print("================================================================") # print("VAR NAMES") # show_var_names(soco) # # print() # print("================================================================") # print("SOMAS HAVING ALL THREE") # show_somas_with_all_three(soco) # # print() # print("================================================================") # print("OBS_ID COUNTS") # show_obs_id_counts(soco) # # print() # print("================================================================") # print("VAR_ID COUNTS") # show_var_id_counts(soco) # # print() # print("================================================================") # print("OBS UNIQUE VALUES FOR CELL_TYPE") # show_obs_column_unique_values(soco, "cell_type") # # print() # print("================================================================") # print("OBS UNIQUE VALUES FOR FEATURE_NAME") # show_var_column_unique_values(soco, "feature_name") # # print() # print("================================================================") # print("OBS VALUE COUNTS FOR CELL_TYPE AND TISSUE") # show_obs_value_counts(soco, ["cell_type", "tissue"]) # # print() # print("================================================================") # print("VAR VALUE COUNTS FOR CELL_TYPE AND FEATURE_NAME") # show_var_value_counts(soco, ["feature_name"]) print() print("================================================================") print("SHOW SOMAS HAVING") show_somas_having( soco, {"cell_type": ["B cell", "T cell"], "tissue": ["blood", "lung"]}, {"feature_name": ["MT-CO3"]}, ) # ---------------------------------------------------------------- def print_names_and_uris(soco: t.SOMACollection) -> None: for soma in soco: print("%-40s %s" % (soma.name, soma.uri)) # ---------------------------------------------------------------- def show_obs_names(soco: t.SOMACollection) -> None: for soma in soco: print(soma.uri) for attr_name in soma.obs.keys(): print(" obs", attr_name) # ---------------------------------------------------------------- def show_var_names(soco: t.SOMACollection) -> None: for soma in soco: print(soma.uri) for attr_name in soma.var.keys(): print(" var", attr_name) # ---------------------------------------------------------------- def show_somas_with_all_three(soco: t.SOMACollection) -> None: for soma in soco: if "cell_type" in soma.obs.attr_names(): if "tissue" in soma.obs.attr_names(): if "feature_name" in soma.var.attr_names(): print(soma.uri) # ---------------------------------------------------------------- def show_obs_id_counts(soco: t.SOMACollection) -> None: counts = {} for soma in soco: for oid in soma.obs.ids(): if oid in counts: counts[oid] += 1 else: counts[oid] = 1 df = pandas.DataFrame.from_dict( {"obs_id": counts.keys(), "counts": counts.values()} ) # print(df.head()) print(df) # ---------------------------------------------------------------- def show_var_id_counts(soco: t.SOMACollection) -> None: counts = {} for soma in soco: for oid in soma.var.ids(): if oid in counts: counts[oid] += 1 else: counts[oid] = 1 df = pandas.DataFrame.from_dict( {"var_id": counts.keys(), "counts": counts.values()} ) # print(df.head()) print(df) # ---------------------------------------------------------------- def show_obs_column_unique_values(soco: t.
681
20
5,161
single-cell-data__tiledb-soma
a367a8ce8cdffbcd4c14677da1eeeb14debc1876
apis/python/examples/pre-query.py
Unknown
SOMACollection
true
class
33
35
false
false
[ "SOMACollection", "None", "soma", "SOMA", "show_single_cell_group", "annotation_matrix", "annotation_matrix_group", "annotation_pairwise_matrix_group", "AnnotationMatrix", "AnnotationMatrixGroup", "AnnotationPairwiseMatrixGroup", "assay_matrix", "assay_matrix_group", "AssayMatrix", "AssayMatrixGroup", "describe_ann_file", "raw_group", "RawGroup", "soma_collection", "soma_options", "SOMAOptions", "tiledb_array", "tiledb_group", "tiledb_object", "TileDBArray", "TileDBGroup", "TileDBObject", "uns_array", "uns_group", "UnsArray", "UnsGroup", "util_ann", "util_tiledb" ]
[ { "name": "annotation_dataframe", "type": "module" }, { "name": "annotation_matrix", "type": "module" }, { "name": "annotation_matrix_group", "type": "module" }, { "name": "annotation_pairwise_matrix_group", "type": "module" }, { "name": "AnnotationMatrix", "type": "class" }, { "name": "AnnotationMatrixGroup", "type": "class" }, { "name": "AnnotationPairwiseMatrixGroup", "type": "class" }, { "name": "assay_matrix", "type": "module" }, { "name": "assay_matrix_group", "type": "module" }, { "name": "AssayMatrix", "type": "class" }, { "name": "AssayMatrixGroup", "type": "class" }, { "name": "describe_ann_file", "type": "function" }, { "name": "io", "type": "module" }, { "name": "raw_group", "type": "module" }, { "name": "RawGroup", "type": "class" }, { "name": "show_single_cell_group", "type": "function" }, { "name": "SOMA", "type": "class" }, { "name": "soma", "type": "module" }, { "name": "soma_collection", "type": "module" }, { "name": "soma_options", "type": "module" }, { "name": "SOMACollection", "type": "class" }, { "name": "SOMAOptions", "type": "class" }, { "name": "tiledb_array", "type": "module" }, { "name": "tiledb_group", "type": "module" }, { "name": "tiledb_object", "type": "module" }, { "name": "TileDBArray", "type": "class" }, { "name": "TileDBGroup", "type": "class" }, { "name": "TileDBObject", "type": "class" }, { "name": "uns_array", "type": "module" }, { "name": "uns_group", "type": "module" }, { "name": "UnsArray", "type": "class" }, { "name": "UnsGroup", "type": "class" }, { "name": "util", "type": "module" }, { "name": "util_ann", "type": "module" }, { "name": "util_tiledb", "type": "module" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
#!/usr/bin/env python # Invoke this with, for example, # # peek-soco ./soma-collection # # -- then you can inspect the SOMACollection object import tiledb import tiledbsc as t import pandas import sys, os from typing import List, Dict # ================================================================ def main(): if len(sys.argv) == 1: soco_path = "soma-collection" elif len(sys.argv) == 2: soco_path = sys.argv[1] else: print(f"{sys.argv[0]}: need just one soma-collection path.", file=sys.stderr) sys.exit(1) soco = t.SOMACollection(soco_path) # print() # print("================================================================") # print("NAMES AND URIS") # print_names_and_uris(soco) # # print() # print("================================================================") # print("OBS NAMES") # show_obs_names(soco) # # print() # print("================================================================") # print("VAR NAMES") # show_var_names(soco) # # print() # print("================================================================") # print("SOMAS HAVING ALL THREE") # show_somas_with_all_three(soco) # # print() # print("================================================================") # print("OBS_ID COUNTS") # show_obs_id_counts(soco) # # print() # print("================================================================") # print("VAR_ID COUNTS") # show_var_id_counts(soco) # # print() # print("================================================================") # print("OBS UNIQUE VALUES FOR CELL_TYPE") # show_obs_column_unique_values(soco, "cell_type") # # print() # print("================================================================") # print("OBS UNIQUE VALUES FOR FEATURE_NAME") # show_var_column_unique_values(soco, "feature_name") # # print() # print("================================================================") # print("OBS VALUE COUNTS FOR CELL_TYPE AND TISSUE") # show_obs_value_counts(soco, ["cell_type", "tissue"]) # # print() # print("================================================================") # print("VAR VALUE COUNTS FOR CELL_TYPE AND FEATURE_NAME") # show_var_value_counts(soco, ["feature_name"]) print() print("================================================================") print("SHOW SOMAS HAVING") show_somas_having( soco, {"cell_type": ["B cell", "T cell"], "tissue": ["blood", "lung"]}, {"feature_name": ["MT-CO3"]}, ) # ---------------------------------------------------------------- def print_names_and_uris(soco: t.SOMACollection) -> None: for soma in soco: print("%-40s %s" % (soma.name, soma.uri)) # ---------------------------------------------------------------- def show_obs_names(soco: t.SOMACollection) -> None: for soma in soco: print(soma.uri) for attr_name in soma.obs.keys(): print(" obs", attr_name) # ---------------------------------------------------------------- def show_var_names(soco: t.SOMACollection) -> None: for soma in soco: print(soma.uri) for attr_name in soma.var.keys(): print(" var", attr_name) # ---------------------------------------------------------------- def show_somas_with_all_three(soco: t.SOMACollection) -> None: for soma in soco: if "cell_type" in soma.obs.attr_names(): if "tissue" in soma.obs.attr_names(): if "feature_name" in soma.var.attr_names(): print(soma.uri) # ---------------------------------------------------------------- def show_obs_id_counts(soco: t.SOMACollection) -> None: counts = {} for soma in soco: for oid in soma.obs.ids(): if oid in counts: counts[oid] += 1 else: counts[oid] = 1 df = pandas.DataFrame.from_dict( {"obs_id": counts.keys(), "counts": counts.values()} ) # print(df.head()) print(df) # ---------------------------------------------------------------- def show_var_id_counts(soco: t.SOMACollection) -> None: counts = {} for soma in soco: for oid in soma.var.ids(): if oid in counts: counts[oid] += 1 else: counts[oid] = 1 df = pandas.DataFrame.from_dict( {"var_id": counts.keys(), "counts": counts.values()} ) # print(df.head()) print(df) # ---------------------------------------------------------------- def show_obs_column_unique_values(soco: t.SOMACollection, col_name: str) -> None: for soma in soco: print() print(soma.uri) if col_name in soma.obs.keys(): print(soma.obs.df()[col_name].unique()) # ---------------------------------------------------------------- def show_var_column_unique_values(soco: t.
682
20
5,458
single-cell-data__tiledb-soma
a367a8ce8cdffbcd4c14677da1eeeb14debc1876
apis/python/examples/pre-query.py
Unknown
SOMACollection
true
class
33
35
false
false
[ "SOMACollection", "None", "soma", "SOMA", "show_single_cell_group", "annotation_matrix", "annotation_matrix_group", "annotation_pairwise_matrix_group", "AnnotationMatrix", "AnnotationMatrixGroup", "AnnotationPairwiseMatrixGroup", "assay_matrix", "assay_matrix_group", "AssayMatrix", "AssayMatrixGroup", "describe_ann_file", "raw_group", "RawGroup", "soma_collection", "soma_options", "SOMAOptions", "tiledb_array", "tiledb_group", "tiledb_object", "TileDBArray", "TileDBGroup", "TileDBObject", "uns_array", "uns_group", "UnsArray", "UnsGroup", "util_ann", "util_tiledb" ]
[ { "name": "annotation_dataframe", "type": "module" }, { "name": "annotation_matrix", "type": "module" }, { "name": "annotation_matrix_group", "type": "module" }, { "name": "annotation_pairwise_matrix_group", "type": "module" }, { "name": "AnnotationMatrix", "type": "class" }, { "name": "AnnotationMatrixGroup", "type": "class" }, { "name": "AnnotationPairwiseMatrixGroup", "type": "class" }, { "name": "assay_matrix", "type": "module" }, { "name": "assay_matrix_group", "type": "module" }, { "name": "AssayMatrix", "type": "class" }, { "name": "AssayMatrixGroup", "type": "class" }, { "name": "describe_ann_file", "type": "function" }, { "name": "io", "type": "module" }, { "name": "raw_group", "type": "module" }, { "name": "RawGroup", "type": "class" }, { "name": "show_single_cell_group", "type": "function" }, { "name": "SOMA", "type": "class" }, { "name": "soma", "type": "module" }, { "name": "soma_collection", "type": "module" }, { "name": "soma_options", "type": "module" }, { "name": "SOMACollection", "type": "class" }, { "name": "SOMAOptions", "type": "class" }, { "name": "tiledb_array", "type": "module" }, { "name": "tiledb_group", "type": "module" }, { "name": "tiledb_object", "type": "module" }, { "name": "TileDBArray", "type": "class" }, { "name": "TileDBGroup", "type": "class" }, { "name": "TileDBObject", "type": "class" }, { "name": "uns_array", "type": "module" }, { "name": "uns_group", "type": "module" }, { "name": "UnsArray", "type": "class" }, { "name": "UnsGroup", "type": "class" }, { "name": "util", "type": "module" }, { "name": "util_ann", "type": "module" }, { "name": "util_tiledb", "type": "module" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
#!/usr/bin/env python # Invoke this with, for example, # # peek-soco ./soma-collection # # -- then you can inspect the SOMACollection object import tiledb import tiledbsc as t import pandas import sys, os from typing import List, Dict # ================================================================ def main(): if len(sys.argv) == 1: soco_path = "soma-collection" elif len(sys.argv) == 2: soco_path = sys.argv[1] else: print(f"{sys.argv[0]}: need just one soma-collection path.", file=sys.stderr) sys.exit(1) soco = t.SOMACollection(soco_path) # print() # print("================================================================") # print("NAMES AND URIS") # print_names_and_uris(soco) # # print() # print("================================================================") # print("OBS NAMES") # show_obs_names(soco) # # print() # print("================================================================") # print("VAR NAMES") # show_var_names(soco) # # print() # print("================================================================") # print("SOMAS HAVING ALL THREE") # show_somas_with_all_three(soco) # # print() # print("================================================================") # print("OBS_ID COUNTS") # show_obs_id_counts(soco) # # print() # print("================================================================") # print("VAR_ID COUNTS") # show_var_id_counts(soco) # # print() # print("================================================================") # print("OBS UNIQUE VALUES FOR CELL_TYPE") # show_obs_column_unique_values(soco, "cell_type") # # print() # print("================================================================") # print("OBS UNIQUE VALUES FOR FEATURE_NAME") # show_var_column_unique_values(soco, "feature_name") # # print() # print("================================================================") # print("OBS VALUE COUNTS FOR CELL_TYPE AND TISSUE") # show_obs_value_counts(soco, ["cell_type", "tissue"]) # # print() # print("================================================================") # print("VAR VALUE COUNTS FOR CELL_TYPE AND FEATURE_NAME") # show_var_value_counts(soco, ["feature_name"]) print() print("================================================================") print("SHOW SOMAS HAVING") show_somas_having( soco, {"cell_type": ["B cell", "T cell"], "tissue": ["blood", "lung"]}, {"feature_name": ["MT-CO3"]}, ) # ---------------------------------------------------------------- def print_names_and_uris(soco: t.SOMACollection) -> None: for soma in soco: print("%-40s %s" % (soma.name, soma.uri)) # ---------------------------------------------------------------- def show_obs_names(soco: t.SOMACollection) -> None: for soma in soco: print(soma.uri) for attr_name in soma.obs.keys(): print(" obs", attr_name) # ---------------------------------------------------------------- def show_var_names(soco: t.SOMACollection) -> None: for soma in soco: print(soma.uri) for attr_name in soma.var.keys(): print(" var", attr_name) # ---------------------------------------------------------------- def show_somas_with_all_three(soco: t.SOMACollection) -> None: for soma in soco: if "cell_type" in soma.obs.attr_names(): if "tissue" in soma.obs.attr_names(): if "feature_name" in soma.var.attr_names(): print(soma.uri) # ---------------------------------------------------------------- def show_obs_id_counts(soco: t.SOMACollection) -> None: counts = {} for soma in soco: for oid in soma.obs.ids(): if oid in counts: counts[oid] += 1 else: counts[oid] = 1 df = pandas.DataFrame.from_dict( {"obs_id": counts.keys(), "counts": counts.values()} ) # print(df.head()) print(df) # ---------------------------------------------------------------- def show_var_id_counts(soco: t.SOMACollection) -> None: counts = {} for soma in soco: for oid in soma.var.ids(): if oid in counts: counts[oid] += 1 else: counts[oid] = 1 df = pandas.DataFrame.from_dict( {"var_id": counts.keys(), "counts": counts.values()} ) # print(df.head()) print(df) # ---------------------------------------------------------------- def show_obs_column_unique_values(soco: t.SOMACollection, col_name: str) -> None: for soma in soco: print() print(soma.uri) if col_name in soma.obs.keys(): print(soma.obs.df()[col_name].unique()) # ---------------------------------------------------------------- def show_var_column_unique_values(soco: t.SOMACollection, col_name: str) -> None: for soma in soco: print() print(soma.uri) if col_name in soma.var.keys(): print(soma.var.df()[col_name].unique()) # ---------------------------------------------------------------- def show_obs_value_counts(soco: t.
683
20
6,404
single-cell-data__tiledb-soma
a367a8ce8cdffbcd4c14677da1eeeb14debc1876
apis/python/examples/pre-query.py
Unknown
SOMACollection
true
class
33
35
false
false
[ "SOMACollection", "None", "soma", "SOMA", "show_single_cell_group", "annotation_matrix", "annotation_matrix_group", "annotation_pairwise_matrix_group", "AnnotationMatrix", "AnnotationMatrixGroup", "AnnotationPairwiseMatrixGroup", "assay_matrix", "assay_matrix_group", "AssayMatrix", "AssayMatrixGroup", "describe_ann_file", "raw_group", "RawGroup", "soma_collection", "soma_options", "SOMAOptions", "tiledb_array", "tiledb_group", "tiledb_object", "TileDBArray", "TileDBGroup", "TileDBObject", "uns_array", "uns_group", "UnsArray", "UnsGroup", "util_ann", "util_tiledb" ]
[ { "name": "annotation_dataframe", "type": "module" }, { "name": "annotation_matrix", "type": "module" }, { "name": "annotation_matrix_group", "type": "module" }, { "name": "annotation_pairwise_matrix_group", "type": "module" }, { "name": "AnnotationMatrix", "type": "class" }, { "name": "AnnotationMatrixGroup", "type": "class" }, { "name": "AnnotationPairwiseMatrixGroup", "type": "class" }, { "name": "assay_matrix", "type": "module" }, { "name": "assay_matrix_group", "type": "module" }, { "name": "AssayMatrix", "type": "class" }, { "name": "AssayMatrixGroup", "type": "class" }, { "name": "describe_ann_file", "type": "function" }, { "name": "io", "type": "module" }, { "name": "raw_group", "type": "module" }, { "name": "RawGroup", "type": "class" }, { "name": "show_single_cell_group", "type": "function" }, { "name": "SOMA", "type": "class" }, { "name": "soma", "type": "module" }, { "name": "soma_collection", "type": "module" }, { "name": "soma_options", "type": "module" }, { "name": "SOMACollection", "type": "class" }, { "name": "SOMAOptions", "type": "class" }, { "name": "tiledb_array", "type": "module" }, { "name": "tiledb_group", "type": "module" }, { "name": "tiledb_object", "type": "module" }, { "name": "TileDBArray", "type": "class" }, { "name": "TileDBGroup", "type": "class" }, { "name": "TileDBObject", "type": "class" }, { "name": "uns_array", "type": "module" }, { "name": "uns_group", "type": "module" }, { "name": "UnsArray", "type": "class" }, { "name": "UnsGroup", "type": "class" }, { "name": "util", "type": "module" }, { "name": "util_ann", "type": "module" }, { "name": "util_tiledb", "type": "module" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
#!/usr/bin/env python # Invoke this with, for example, # # peek-soco ./soma-collection # # -- then you can inspect the SOMACollection object import tiledb import tiledbsc as t import pandas import sys, os from typing import List, Dict # ================================================================ def main(): if len(sys.argv) == 1: soco_path = "soma-collection" elif len(sys.argv) == 2: soco_path = sys.argv[1] else: print(f"{sys.argv[0]}: need just one soma-collection path.", file=sys.stderr) sys.exit(1) soco = t.SOMACollection(soco_path) # print() # print("================================================================") # print("NAMES AND URIS") # print_names_and_uris(soco) # # print() # print("================================================================") # print("OBS NAMES") # show_obs_names(soco) # # print() # print("================================================================") # print("VAR NAMES") # show_var_names(soco) # # print() # print("================================================================") # print("SOMAS HAVING ALL THREE") # show_somas_with_all_three(soco) # # print() # print("================================================================") # print("OBS_ID COUNTS") # show_obs_id_counts(soco) # # print() # print("================================================================") # print("VAR_ID COUNTS") # show_var_id_counts(soco) # # print() # print("================================================================") # print("OBS UNIQUE VALUES FOR CELL_TYPE") # show_obs_column_unique_values(soco, "cell_type") # # print() # print("================================================================") # print("OBS UNIQUE VALUES FOR FEATURE_NAME") # show_var_column_unique_values(soco, "feature_name") # # print() # print("================================================================") # print("OBS VALUE COUNTS FOR CELL_TYPE AND TISSUE") # show_obs_value_counts(soco, ["cell_type", "tissue"]) # # print() # print("================================================================") # print("VAR VALUE COUNTS FOR CELL_TYPE AND FEATURE_NAME") # show_var_value_counts(soco, ["feature_name"]) print() print("================================================================") print("SHOW SOMAS HAVING") show_somas_having( soco, {"cell_type": ["B cell", "T cell"], "tissue": ["blood", "lung"]}, {"feature_name": ["MT-CO3"]}, ) # ---------------------------------------------------------------- def print_names_and_uris(soco: t.SOMACollection) -> None: for soma in soco: print("%-40s %s" % (soma.name, soma.uri)) # ---------------------------------------------------------------- def show_obs_names(soco: t.SOMACollection) -> None: for soma in soco: print(soma.uri) for attr_name in soma.obs.keys(): print(" obs", attr_name) # ---------------------------------------------------------------- def show_var_names(soco: t.SOMACollection) -> None: for soma in soco: print(soma.uri) for attr_name in soma.var.keys(): print(" var", attr_name) # ---------------------------------------------------------------- def show_somas_with_all_three(soco: t.SOMACollection) -> None: for soma in soco: if "cell_type" in soma.obs.attr_names(): if "tissue" in soma.obs.attr_names(): if "feature_name" in soma.var.attr_names(): print(soma.uri) # ---------------------------------------------------------------- def show_obs_id_counts(soco: t.SOMACollection) -> None: counts = {} for soma in soco: for oid in soma.obs.ids(): if oid in counts: counts[oid] += 1 else: counts[oid] = 1 df = pandas.DataFrame.from_dict( {"obs_id": counts.keys(), "counts": counts.values()} ) # print(df.head()) print(df) # ---------------------------------------------------------------- def show_var_id_counts(soco: t.SOMACollection) -> None: counts = {} for soma in soco: for oid in soma.var.ids(): if oid in counts: counts[oid] += 1 else: counts[oid] = 1 df = pandas.DataFrame.from_dict( {"var_id": counts.keys(), "counts": counts.values()} ) # print(df.head()) print(df) # ---------------------------------------------------------------- def show_obs_column_unique_values(soco: t.SOMACollection, col_name: str) -> None: for soma in soco: print() print(soma.uri) if col_name in soma.obs.keys(): print(soma.obs.df()[col_name].unique()) # ---------------------------------------------------------------- def show_var_column_unique_values(soco: t.SOMACollection, col_name: str) -> None: for soma in soco: print() print(soma.uri) if col_name in soma.var.keys(): print(soma.var.df()[col_name].unique()) # ---------------------------------------------------------------- def show_obs_value_counts(soco: t.SOMACollection, obs_labels: List[str]) -> None: for obs_label in obs_labels: counts = {} for soma in soco: print("...", soma.name) # print("\n".join(sorted(soma.obs.attr_names()))) obs = soma.obs.df() if not obs_label in obs: continue obs_label_values = sorted(list(set(obs[obs_label]))) for obs_label_value in obs_label_values: if obs_label_value in counts: counts[obs_label_value] += 1 else: counts[obs_label_value] = 1 print( "----------------------------------------------------------------", obs_label, ) for k, v in dict(sorted(counts.items(), key=lambda item: item[1])).items(): print(k, v) # ---------------------------------------------------------------- def show_var_value_counts(soco: t.
684
20
7,351
single-cell-data__tiledb-soma
a367a8ce8cdffbcd4c14677da1eeeb14debc1876
apis/python/examples/pre-query.py
inproject
SOMACollection
true
class
33
35
false
false
[ "SOMACollection", "None", "soma", "SOMA", "soma_options", "annotation_matrix", "annotation_matrix_group", "annotation_pairwise_matrix_group", "AnnotationMatrix", "AnnotationMatrixGroup", "AnnotationPairwiseMatrixGroup", "assay_matrix", "assay_matrix_group", "AssayMatrix", "AssayMatrixGroup", "describe_ann_file", "raw_group", "RawGroup", "show_single_cell_group", "soma_collection", "SOMAOptions", "tiledb_array", "tiledb_group", "tiledb_object", "TileDBArray", "TileDBGroup", "TileDBObject", "uns_array", "uns_group", "UnsArray", "UnsGroup", "util_ann", "util_tiledb" ]
[ { "name": "annotation_dataframe", "type": "module" }, { "name": "annotation_matrix", "type": "module" }, { "name": "annotation_matrix_group", "type": "module" }, { "name": "annotation_pairwise_matrix_group", "type": "module" }, { "name": "AnnotationMatrix", "type": "class" }, { "name": "AnnotationMatrixGroup", "type": "class" }, { "name": "AnnotationPairwiseMatrixGroup", "type": "class" }, { "name": "assay_matrix", "type": "module" }, { "name": "assay_matrix_group", "type": "module" }, { "name": "AssayMatrix", "type": "class" }, { "name": "AssayMatrixGroup", "type": "class" }, { "name": "describe_ann_file", "type": "function" }, { "name": "io", "type": "module" }, { "name": "raw_group", "type": "module" }, { "name": "RawGroup", "type": "class" }, { "name": "show_single_cell_group", "type": "function" }, { "name": "SOMA", "type": "class" }, { "name": "soma", "type": "module" }, { "name": "soma_collection", "type": "module" }, { "name": "soma_options", "type": "module" }, { "name": "SOMACollection", "type": "class" }, { "name": "SOMAOptions", "type": "class" }, { "name": "tiledb_array", "type": "module" }, { "name": "tiledb_group", "type": "module" }, { "name": "tiledb_object", "type": "module" }, { "name": "TileDBArray", "type": "class" }, { "name": "TileDBGroup", "type": "class" }, { "name": "TileDBObject", "type": "class" }, { "name": "uns_array", "type": "module" }, { "name": "uns_group", "type": "module" }, { "name": "UnsArray", "type": "class" }, { "name": "UnsGroup", "type": "class" }, { "name": "util", "type": "module" }, { "name": "util_ann", "type": "module" }, { "name": "util_tiledb", "type": "module" }, { "name": "__doc__", "type": "instance" }, { "name": "__file__", "type": "instance" }, { "name": "__name__", "type": "instance" }, { "name": "__package__", "type": "instance" } ]
#!/usr/bin/env python # Invoke this with, for example, # # peek-soco ./soma-collection # # -- then you can inspect the SOMACollection object import tiledb import tiledbsc as t import pandas import sys, os from typing import List, Dict # ================================================================ def main(): if len(sys.argv) == 1: soco_path = "soma-collection" elif len(sys.argv) == 2: soco_path = sys.argv[1] else: print(f"{sys.argv[0]}: need just one soma-collection path.", file=sys.stderr) sys.exit(1) soco = t.SOMACollection(soco_path) # print() # print("================================================================") # print("NAMES AND URIS") # print_names_and_uris(soco) # # print() # print("================================================================") # print("OBS NAMES") # show_obs_names(soco) # # print() # print("================================================================") # print("VAR NAMES") # show_var_names(soco) # # print() # print("================================================================") # print("SOMAS HAVING ALL THREE") # show_somas_with_all_three(soco) # # print() # print("================================================================") # print("OBS_ID COUNTS") # show_obs_id_counts(soco) # # print() # print("================================================================") # print("VAR_ID COUNTS") # show_var_id_counts(soco) # # print() # print("================================================================") # print("OBS UNIQUE VALUES FOR CELL_TYPE") # show_obs_column_unique_values(soco, "cell_type") # # print() # print("================================================================") # print("OBS UNIQUE VALUES FOR FEATURE_NAME") # show_var_column_unique_values(soco, "feature_name") # # print() # print("================================================================") # print("OBS VALUE COUNTS FOR CELL_TYPE AND TISSUE") # show_obs_value_counts(soco, ["cell_type", "tissue"]) # # print() # print("================================================================") # print("VAR VALUE COUNTS FOR CELL_TYPE AND FEATURE_NAME") # show_var_value_counts(soco, ["feature_name"]) print() print("================================================================") print("SHOW SOMAS HAVING") show_somas_having( soco, {"cell_type": ["B cell", "T cell"], "tissue": ["blood", "lung"]}, {"feature_name": ["MT-CO3"]}, ) # ---------------------------------------------------------------- def print_names_and_uris(soco: t.SOMACollection) -> None: for soma in soco: print("%-40s %s" % (soma.name, soma.uri)) # ---------------------------------------------------------------- def show_obs_names(soco: t.SOMACollection) -> None: for soma in soco: print(soma.uri) for attr_name in soma.obs.keys(): print(" obs", attr_name) # ---------------------------------------------------------------- def show_var_names(soco: t.SOMACollection) -> None: for soma in soco: print(soma.uri) for attr_name in soma.var.keys(): print(" var", attr_name) # ---------------------------------------------------------------- def show_somas_with_all_three(soco: t.SOMACollection) -> None: for soma in soco: if "cell_type" in soma.obs.attr_names(): if "tissue" in soma.obs.attr_names(): if "feature_name" in soma.var.attr_names(): print(soma.uri) # ---------------------------------------------------------------- def show_obs_id_counts(soco: t.SOMACollection) -> None: counts = {} for soma in soco: for oid in soma.obs.ids(): if oid in counts: counts[oid] += 1 else: counts[oid] = 1 df = pandas.DataFrame.from_dict( {"obs_id": counts.keys(), "counts": counts.values()} ) # print(df.head()) print(df) # ---------------------------------------------------------------- def show_var_id_counts(soco: t.SOMACollection) -> None: counts = {} for soma in soco: for oid in soma.var.ids(): if oid in counts: counts[oid] += 1 else: counts[oid] = 1 df = pandas.DataFrame.from_dict( {"var_id": counts.keys(), "counts": counts.values()} ) # print(df.head()) print(df) # ---------------------------------------------------------------- def show_obs_column_unique_values(soco: t.SOMACollection, col_name: str) -> None: for soma in soco: print() print(soma.uri) if col_name in soma.obs.keys(): print(soma.obs.df()[col_name].unique()) # ---------------------------------------------------------------- def show_var_column_unique_values(soco: t.SOMACollection, col_name: str) -> None: for soma in soco: print() print(soma.uri) if col_name in soma.var.keys(): print(soma.var.df()[col_name].unique()) # ---------------------------------------------------------------- def show_obs_value_counts(soco: t.SOMACollection, obs_labels: List[str]) -> None: for obs_label in obs_labels: counts = {} for soma in soco: print("...", soma.name) # print("\n".join(sorted(soma.obs.attr_names()))) obs = soma.obs.df() if not obs_label in obs: continue obs_label_values = sorted(list(set(obs[obs_label]))) for obs_label_value in obs_label_values: if obs_label_value in counts: counts[obs_label_value] += 1 else: counts[obs_label_value] = 1 print( "----------------------------------------------------------------", obs_label, ) for k, v in dict(sorted(counts.items(), key=lambda item: item[1])).items(): print(k, v) # ---------------------------------------------------------------- def show_var_value_counts(soco: t.SOMACollection, var_labels: List[str]) -> None: for var_label in var_labels: counts = {} for soma in soco: print("...", soma.name) # print("\n".join(sorted(soma.var.attr_names()))) var = soma.var.df() if not var_label in var: continue var_label_values = sorted(list(set(var[var_label]))) for var_label_value in var_label_values: if var_label_value in counts: counts[var_label_value] += 1 else: counts[var_label_value] = 1 print( "----------------------------------------------------------------", var_label, ) for k, v in dict(sorted(counts.items(), key=lambda item: item[1])).items(): print(k, v) # ---------------------------------------------------------------- def show_somas_having( soco: t.
685
25
1,416
2i2c-org__infrastructure
a417fb77f4ccf22fae09e1bf79e3746eedc1db86
deployer/deploy_actions.py
Unknown
auth
true
function
11
11
false
true
[ "auth", "support", "hubs", "spec", "deploy_support", "auth_aws", "auth_azure", "auth_gcp", "auth_kubeconfig", "config_path", "ensure_docker_credhelpers", "__init__", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "auth", "type": "function" }, { "name": "auth_aws", "type": "function" }, { "name": "auth_azure", "type": "function" }, { "name": "auth_gcp", "type": "function" }, { "name": "auth_kubeconfig", "type": "function" }, { "name": "config_path", "type": "statement" }, { "name": "deploy_support", "type": "function" }, { "name": "ensure_docker_credhelpers", "type": "function" }, { "name": "hubs", "type": "statement" }, { "name": "spec", "type": "statement" }, { "name": "support", "type": "statement" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
""" Actions available when deploying many JupyterHubs to many Kubernetes clusters """ import os import shutil import subprocess from pathlib import Path from ruamel.yaml import YAML from auth import KeyProvider from cluster import Cluster from utils import print_colour from file_acquisition import find_absolute_path_to_cluster_file, get_decrypted_file from config_validation import ( validate_cluster_config, validate_hub_config, validate_support_config, ) # Without `pure=True`, I get an exception about str / byte issues yaml = YAML(typ="safe", pure=True) helm_charts_dir = Path(__file__).parent.parent.joinpath("helm-charts") def use_cluster_credentials(cluster_name): """ Quickly gain command-line access to a cluster by updating the current kubeconfig file to include the deployer's access credentials for the named cluster and mark it as the cluster to work against by default. This function is to be used with the `use-cluster-credentials` CLI command only - it is not used by the rest of the deployer codebase. """ validate_cluster_config(cluster_name) config_file_path = find_absolute_path_to_cluster_file(cluster_name) with open(config_file_path) as f: cluster = Cluster(yaml.load(f), config_file_path.parent) # Cluster.auth() method has the context manager decorator so cannot call # it like a normal function with cluster.
686
25
2,299
2i2c-org__infrastructure
a417fb77f4ccf22fae09e1bf79e3746eedc1db86
deployer/deploy_actions.py
Unknown
support
true
statement
11
11
false
true
[ "support", "deploy_support", "auth", "hubs", "spec", "auth_aws", "auth_azure", "auth_gcp", "auth_kubeconfig", "config_path", "ensure_docker_credhelpers", "__init__", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "auth", "type": "function" }, { "name": "auth_aws", "type": "function" }, { "name": "auth_azure", "type": "function" }, { "name": "auth_gcp", "type": "function" }, { "name": "auth_kubeconfig", "type": "function" }, { "name": "config_path", "type": "statement" }, { "name": "deploy_support", "type": "function" }, { "name": "ensure_docker_credhelpers", "type": "function" }, { "name": "hubs", "type": "statement" }, { "name": "spec", "type": "statement" }, { "name": "support", "type": "statement" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
""" Actions available when deploying many JupyterHubs to many Kubernetes clusters """ import os import shutil import subprocess from pathlib import Path from ruamel.yaml import YAML from auth import KeyProvider from cluster import Cluster from utils import print_colour from file_acquisition import find_absolute_path_to_cluster_file, get_decrypted_file from config_validation import ( validate_cluster_config, validate_hub_config, validate_support_config, ) # Without `pure=True`, I get an exception about str / byte issues yaml = YAML(typ="safe", pure=True) helm_charts_dir = Path(__file__).parent.parent.joinpath("helm-charts") def use_cluster_credentials(cluster_name): """ Quickly gain command-line access to a cluster by updating the current kubeconfig file to include the deployer's access credentials for the named cluster and mark it as the cluster to work against by default. This function is to be used with the `use-cluster-credentials` CLI command only - it is not used by the rest of the deployer codebase. """ validate_cluster_config(cluster_name) config_file_path = find_absolute_path_to_cluster_file(cluster_name) with open(config_file_path) as f: cluster = Cluster(yaml.load(f), config_file_path.parent) # Cluster.auth() method has the context manager decorator so cannot call # it like a normal function with cluster.auth(): # This command will spawn a new shell with all the env vars (including # KUBECONFIG) inherited, and once you quit that shell the python program # will resume as usual. # TODO: Figure out how to change the PS1 env var of the spawned shell # to change the prompt to f"cluster-{cluster.spec['name']}". This will # make it visually clear that the user is now operating in a different # shell. subprocess.check_call([os.environ["SHELL"], "-l"]) def deploy_support(cluster_name): """ Deploy support components to a cluster """ validate_cluster_config(cluster_name) validate_support_config(cluster_name) config_file_path = find_absolute_path_to_cluster_file(cluster_name) with open(config_file_path) as f: cluster = Cluster(yaml.load(f), config_file_path.parent) if cluster.
687
25
2,329
2i2c-org__infrastructure
a417fb77f4ccf22fae09e1bf79e3746eedc1db86
deployer/deploy_actions.py
inproject
auth
true
function
11
11
false
false
[ "auth", "support", "deploy_support", "hubs", "spec", "auth_aws", "auth_azure", "auth_gcp", "auth_kubeconfig", "config_path", "ensure_docker_credhelpers", "__init__", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "auth", "type": "function" }, { "name": "auth_aws", "type": "function" }, { "name": "auth_azure", "type": "function" }, { "name": "auth_gcp", "type": "function" }, { "name": "auth_kubeconfig", "type": "function" }, { "name": "config_path", "type": "statement" }, { "name": "deploy_support", "type": "function" }, { "name": "ensure_docker_credhelpers", "type": "function" }, { "name": "hubs", "type": "statement" }, { "name": "spec", "type": "statement" }, { "name": "support", "type": "statement" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
""" Actions available when deploying many JupyterHubs to many Kubernetes clusters """ import os import shutil import subprocess from pathlib import Path from ruamel.yaml import YAML from auth import KeyProvider from cluster import Cluster from utils import print_colour from file_acquisition import find_absolute_path_to_cluster_file, get_decrypted_file from config_validation import ( validate_cluster_config, validate_hub_config, validate_support_config, ) # Without `pure=True`, I get an exception about str / byte issues yaml = YAML(typ="safe", pure=True) helm_charts_dir = Path(__file__).parent.parent.joinpath("helm-charts") def use_cluster_credentials(cluster_name): """ Quickly gain command-line access to a cluster by updating the current kubeconfig file to include the deployer's access credentials for the named cluster and mark it as the cluster to work against by default. This function is to be used with the `use-cluster-credentials` CLI command only - it is not used by the rest of the deployer codebase. """ validate_cluster_config(cluster_name) config_file_path = find_absolute_path_to_cluster_file(cluster_name) with open(config_file_path) as f: cluster = Cluster(yaml.load(f), config_file_path.parent) # Cluster.auth() method has the context manager decorator so cannot call # it like a normal function with cluster.auth(): # This command will spawn a new shell with all the env vars (including # KUBECONFIG) inherited, and once you quit that shell the python program # will resume as usual. # TODO: Figure out how to change the PS1 env var of the spawned shell # to change the prompt to f"cluster-{cluster.spec['name']}". This will # make it visually clear that the user is now operating in a different # shell. subprocess.check_call([os.environ["SHELL"], "-l"]) def deploy_support(cluster_name): """ Deploy support components to a cluster """ validate_cluster_config(cluster_name) validate_support_config(cluster_name) config_file_path = find_absolute_path_to_cluster_file(cluster_name) with open(config_file_path) as f: cluster = Cluster(yaml.load(f), config_file_path.parent) if cluster.support: with cluster.
688
25
2,357
2i2c-org__infrastructure
a417fb77f4ccf22fae09e1bf79e3746eedc1db86
deployer/deploy_actions.py
infile
deploy_support
true
function
11
11
false
true
[ "support", "auth", "hubs", "spec", "deploy_support", "auth_aws", "auth_azure", "auth_gcp", "auth_kubeconfig", "config_path", "ensure_docker_credhelpers", "__init__", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "auth", "type": "function" }, { "name": "auth_aws", "type": "function" }, { "name": "auth_azure", "type": "function" }, { "name": "auth_gcp", "type": "function" }, { "name": "auth_kubeconfig", "type": "function" }, { "name": "config_path", "type": "statement" }, { "name": "deploy_support", "type": "function" }, { "name": "ensure_docker_credhelpers", "type": "function" }, { "name": "hubs", "type": "statement" }, { "name": "spec", "type": "statement" }, { "name": "support", "type": "statement" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
""" Actions available when deploying many JupyterHubs to many Kubernetes clusters """ import os import shutil import subprocess from pathlib import Path from ruamel.yaml import YAML from auth import KeyProvider from cluster import Cluster from utils import print_colour from file_acquisition import find_absolute_path_to_cluster_file, get_decrypted_file from config_validation import ( validate_cluster_config, validate_hub_config, validate_support_config, ) # Without `pure=True`, I get an exception about str / byte issues yaml = YAML(typ="safe", pure=True) helm_charts_dir = Path(__file__).parent.parent.joinpath("helm-charts") def use_cluster_credentials(cluster_name): """ Quickly gain command-line access to a cluster by updating the current kubeconfig file to include the deployer's access credentials for the named cluster and mark it as the cluster to work against by default. This function is to be used with the `use-cluster-credentials` CLI command only - it is not used by the rest of the deployer codebase. """ validate_cluster_config(cluster_name) config_file_path = find_absolute_path_to_cluster_file(cluster_name) with open(config_file_path) as f: cluster = Cluster(yaml.load(f), config_file_path.parent) # Cluster.auth() method has the context manager decorator so cannot call # it like a normal function with cluster.auth(): # This command will spawn a new shell with all the env vars (including # KUBECONFIG) inherited, and once you quit that shell the python program # will resume as usual. # TODO: Figure out how to change the PS1 env var of the spawned shell # to change the prompt to f"cluster-{cluster.spec['name']}". This will # make it visually clear that the user is now operating in a different # shell. subprocess.check_call([os.environ["SHELL"], "-l"]) def deploy_support(cluster_name): """ Deploy support components to a cluster """ validate_cluster_config(cluster_name) validate_support_config(cluster_name) config_file_path = find_absolute_path_to_cluster_file(cluster_name) with open(config_file_path) as f: cluster = Cluster(yaml.load(f), config_file_path.parent) if cluster.support: with cluster.auth(): cluster.
689
25
3,037
2i2c-org__infrastructure
a417fb77f4ccf22fae09e1bf79e3746eedc1db86
deployer/deploy_actions.py
Unknown
support
true
statement
11
11
false
false
[ "auth", "hubs", "support", "deploy_support", "spec", "auth_aws", "auth_azure", "auth_gcp", "auth_kubeconfig", "config_path", "ensure_docker_credhelpers", "__init__", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "auth", "type": "function" }, { "name": "auth_aws", "type": "function" }, { "name": "auth_azure", "type": "function" }, { "name": "auth_gcp", "type": "function" }, { "name": "auth_kubeconfig", "type": "function" }, { "name": "config_path", "type": "statement" }, { "name": "deploy_support", "type": "function" }, { "name": "ensure_docker_credhelpers", "type": "function" }, { "name": "hubs", "type": "statement" }, { "name": "spec", "type": "statement" }, { "name": "support", "type": "statement" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
""" Actions available when deploying many JupyterHubs to many Kubernetes clusters """ import os import shutil import subprocess from pathlib import Path from ruamel.yaml import YAML from auth import KeyProvider from cluster import Cluster from utils import print_colour from file_acquisition import find_absolute_path_to_cluster_file, get_decrypted_file from config_validation import ( validate_cluster_config, validate_hub_config, validate_support_config, ) # Without `pure=True`, I get an exception about str / byte issues yaml = YAML(typ="safe", pure=True) helm_charts_dir = Path(__file__).parent.parent.joinpath("helm-charts") def use_cluster_credentials(cluster_name): """ Quickly gain command-line access to a cluster by updating the current kubeconfig file to include the deployer's access credentials for the named cluster and mark it as the cluster to work against by default. This function is to be used with the `use-cluster-credentials` CLI command only - it is not used by the rest of the deployer codebase. """ validate_cluster_config(cluster_name) config_file_path = find_absolute_path_to_cluster_file(cluster_name) with open(config_file_path) as f: cluster = Cluster(yaml.load(f), config_file_path.parent) # Cluster.auth() method has the context manager decorator so cannot call # it like a normal function with cluster.auth(): # This command will spawn a new shell with all the env vars (including # KUBECONFIG) inherited, and once you quit that shell the python program # will resume as usual. # TODO: Figure out how to change the PS1 env var of the spawned shell # to change the prompt to f"cluster-{cluster.spec['name']}". This will # make it visually clear that the user is now operating in a different # shell. subprocess.check_call([os.environ["SHELL"], "-l"]) def deploy_support(cluster_name): """ Deploy support components to a cluster """ validate_cluster_config(cluster_name) validate_support_config(cluster_name) config_file_path = find_absolute_path_to_cluster_file(cluster_name) with open(config_file_path) as f: cluster = Cluster(yaml.load(f), config_file_path.parent) if cluster.support: with cluster.auth(): cluster.deploy_support() def deploy_grafana_dashboards(cluster_name): """ Deploy grafana dashboards to a cluster that provide useful metrics for operating a JupyterHub Grafana dashboards and deployment mechanism in question are maintained in this repo: https://github.com/jupyterhub/grafana-dashboards """ validate_cluster_config(cluster_name) validate_support_config(cluster_name) config_file_path = find_absolute_path_to_cluster_file(cluster_name) with open(config_file_path) as f: cluster = Cluster(yaml.load(f), config_file_path.parent) # If grafana support chart is not deployed, then there's nothing to do if not cluster.
690
25
3,956
2i2c-org__infrastructure
a417fb77f4ccf22fae09e1bf79e3746eedc1db86
deployer/deploy_actions.py
common
support
true
statement
11
11
false
false
[ "deploy_support", "support", "hubs", "auth", "config_path", "auth_aws", "auth_azure", "auth_gcp", "auth_kubeconfig", "ensure_docker_credhelpers", "spec", "__init__", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "auth", "type": "function" }, { "name": "auth_aws", "type": "function" }, { "name": "auth_azure", "type": "function" }, { "name": "auth_gcp", "type": "function" }, { "name": "auth_kubeconfig", "type": "function" }, { "name": "config_path", "type": "statement" }, { "name": "deploy_support", "type": "function" }, { "name": "ensure_docker_credhelpers", "type": "function" }, { "name": "hubs", "type": "statement" }, { "name": "spec", "type": "statement" }, { "name": "support", "type": "statement" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
""" Actions available when deploying many JupyterHubs to many Kubernetes clusters """ import os import shutil import subprocess from pathlib import Path from ruamel.yaml import YAML from auth import KeyProvider from cluster import Cluster from utils import print_colour from file_acquisition import find_absolute_path_to_cluster_file, get_decrypted_file from config_validation import ( validate_cluster_config, validate_hub_config, validate_support_config, ) # Without `pure=True`, I get an exception about str / byte issues yaml = YAML(typ="safe", pure=True) helm_charts_dir = Path(__file__).parent.parent.joinpath("helm-charts") def use_cluster_credentials(cluster_name): """ Quickly gain command-line access to a cluster by updating the current kubeconfig file to include the deployer's access credentials for the named cluster and mark it as the cluster to work against by default. This function is to be used with the `use-cluster-credentials` CLI command only - it is not used by the rest of the deployer codebase. """ validate_cluster_config(cluster_name) config_file_path = find_absolute_path_to_cluster_file(cluster_name) with open(config_file_path) as f: cluster = Cluster(yaml.load(f), config_file_path.parent) # Cluster.auth() method has the context manager decorator so cannot call # it like a normal function with cluster.auth(): # This command will spawn a new shell with all the env vars (including # KUBECONFIG) inherited, and once you quit that shell the python program # will resume as usual. # TODO: Figure out how to change the PS1 env var of the spawned shell # to change the prompt to f"cluster-{cluster.spec['name']}". This will # make it visually clear that the user is now operating in a different # shell. subprocess.check_call([os.environ["SHELL"], "-l"]) def deploy_support(cluster_name): """ Deploy support components to a cluster """ validate_cluster_config(cluster_name) validate_support_config(cluster_name) config_file_path = find_absolute_path_to_cluster_file(cluster_name) with open(config_file_path) as f: cluster = Cluster(yaml.load(f), config_file_path.parent) if cluster.support: with cluster.auth(): cluster.deploy_support() def deploy_grafana_dashboards(cluster_name): """ Deploy grafana dashboards to a cluster that provide useful metrics for operating a JupyterHub Grafana dashboards and deployment mechanism in question are maintained in this repo: https://github.com/jupyterhub/grafana-dashboards """ validate_cluster_config(cluster_name) validate_support_config(cluster_name) config_file_path = find_absolute_path_to_cluster_file(cluster_name) with open(config_file_path) as f: cluster = Cluster(yaml.load(f), config_file_path.parent) # If grafana support chart is not deployed, then there's nothing to do if not cluster.support: print_colour( "Support chart has not been deployed. Skipping Grafana dashboards deployment..." ) return grafana_token_file = (config_file_path.parent).joinpath( "enc-grafana-token.secret.yaml" ) # Read the cluster specific secret grafana token file with get_decrypted_file(grafana_token_file) as decrypted_file_path: with open(decrypted_file_path) as f: config = yaml.load(f) # Check GRAFANA_TOKEN exists in the secret config file before continuing if "grafana_token" not in config.keys(): raise ValueError( f"`grafana_token` not provided in secret file! Please add it and try again: {grafana_token_file}" ) # FIXME: We assume grafana_url and uses_tls config will be defined in the first # file listed under support.helm_chart_values_files. support_values_file = cluster.
691
25
7,670
2i2c-org__infrastructure
a417fb77f4ccf22fae09e1bf79e3746eedc1db86
deployer/deploy_actions.py
Unknown
auth
true
function
11
11
false
false
[ "auth", "support", "hubs", "deploy_support", "spec", "auth_aws", "auth_azure", "auth_gcp", "auth_kubeconfig", "config_path", "ensure_docker_credhelpers", "__init__", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "auth", "type": "function" }, { "name": "auth_aws", "type": "function" }, { "name": "auth_azure", "type": "function" }, { "name": "auth_gcp", "type": "function" }, { "name": "auth_kubeconfig", "type": "function" }, { "name": "config_path", "type": "statement" }, { "name": "deploy_support", "type": "function" }, { "name": "ensure_docker_credhelpers", "type": "function" }, { "name": "hubs", "type": "statement" }, { "name": "spec", "type": "statement" }, { "name": "support", "type": "statement" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
""" Actions available when deploying many JupyterHubs to many Kubernetes clusters """ import os import shutil import subprocess from pathlib import Path from ruamel.yaml import YAML from auth import KeyProvider from cluster import Cluster from utils import print_colour from file_acquisition import find_absolute_path_to_cluster_file, get_decrypted_file from config_validation import ( validate_cluster_config, validate_hub_config, validate_support_config, ) # Without `pure=True`, I get an exception about str / byte issues yaml = YAML(typ="safe", pure=True) helm_charts_dir = Path(__file__).parent.parent.joinpath("helm-charts") def use_cluster_credentials(cluster_name): """ Quickly gain command-line access to a cluster by updating the current kubeconfig file to include the deployer's access credentials for the named cluster and mark it as the cluster to work against by default. This function is to be used with the `use-cluster-credentials` CLI command only - it is not used by the rest of the deployer codebase. """ validate_cluster_config(cluster_name) config_file_path = find_absolute_path_to_cluster_file(cluster_name) with open(config_file_path) as f: cluster = Cluster(yaml.load(f), config_file_path.parent) # Cluster.auth() method has the context manager decorator so cannot call # it like a normal function with cluster.auth(): # This command will spawn a new shell with all the env vars (including # KUBECONFIG) inherited, and once you quit that shell the python program # will resume as usual. # TODO: Figure out how to change the PS1 env var of the spawned shell # to change the prompt to f"cluster-{cluster.spec['name']}". This will # make it visually clear that the user is now operating in a different # shell. subprocess.check_call([os.environ["SHELL"], "-l"]) def deploy_support(cluster_name): """ Deploy support components to a cluster """ validate_cluster_config(cluster_name) validate_support_config(cluster_name) config_file_path = find_absolute_path_to_cluster_file(cluster_name) with open(config_file_path) as f: cluster = Cluster(yaml.load(f), config_file_path.parent) if cluster.support: with cluster.auth(): cluster.deploy_support() def deploy_grafana_dashboards(cluster_name): """ Deploy grafana dashboards to a cluster that provide useful metrics for operating a JupyterHub Grafana dashboards and deployment mechanism in question are maintained in this repo: https://github.com/jupyterhub/grafana-dashboards """ validate_cluster_config(cluster_name) validate_support_config(cluster_name) config_file_path = find_absolute_path_to_cluster_file(cluster_name) with open(config_file_path) as f: cluster = Cluster(yaml.load(f), config_file_path.parent) # If grafana support chart is not deployed, then there's nothing to do if not cluster.support: print_colour( "Support chart has not been deployed. Skipping Grafana dashboards deployment..." ) return grafana_token_file = (config_file_path.parent).joinpath( "enc-grafana-token.secret.yaml" ) # Read the cluster specific secret grafana token file with get_decrypted_file(grafana_token_file) as decrypted_file_path: with open(decrypted_file_path) as f: config = yaml.load(f) # Check GRAFANA_TOKEN exists in the secret config file before continuing if "grafana_token" not in config.keys(): raise ValueError( f"`grafana_token` not provided in secret file! Please add it and try again: {grafana_token_file}" ) # FIXME: We assume grafana_url and uses_tls config will be defined in the first # file listed under support.helm_chart_values_files. support_values_file = cluster.support.get("helm_chart_values_files", [])[0] with open(config_file_path.parent.joinpath(support_values_file)) as f: support_values_config = yaml.load(f) # Get the url where grafana is running from the support values file grafana_url = ( support_values_config.get("grafana", {}).get("ingress", {}).get("hosts", {}) ) uses_tls = ( support_values_config.get("grafana", {}).get("ingress", {}).get("tls", {}) ) if not grafana_url: print_colour( "Couldn't find `config.grafana.ingress.hosts`. Skipping Grafana dashboards deployment..." ) return grafana_url = ( f"https://{grafana_url[0]}" if uses_tls else f"http://{grafana_url[0]}" ) # Use the jupyterhub/grafana-dashboards deployer to deploy the dashboards to this cluster's grafana print_colour("Cloning jupyterhub/grafana-dashboards...") dashboards_dir = "grafana_dashboards" subprocess.check_call( [ "git", "clone", "https://github.com/jupyterhub/grafana-dashboards", dashboards_dir, ] ) # We need the existing env too for the deployer to be able to find jssonnet and grafonnet deploy_env = os.environ.copy() deploy_env.update({"GRAFANA_TOKEN": config["grafana_token"]}) try: print_colour(f"Deploying grafana dashboards to {cluster_name}...") subprocess.check_call( ["./deploy.py", grafana_url], env=deploy_env, cwd=dashboards_dir ) print_colour(f"Done! Dashboards deployed to {grafana_url}.") finally: # Delete the directory where we cloned the repo. # The deployer cannot call jsonnet to deploy the dashboards if using a temp directory here. # Might be because opening more than once of a temp file is tried # (https://docs.python.org/3.8/library/tempfile.html#tempfile.NamedTemporaryFile) shutil.rmtree(dashboards_dir) def deploy(cluster_name, hub_name, skip_hub_health_test, config_path): """ Deploy one or more hubs in a given cluster """ validate_cluster_config(cluster_name) validate_hub_config(cluster_name, hub_name) with get_decrypted_file(config_path) as decrypted_file_path: with open(decrypted_file_path) as f: config = yaml.load(f) # All our hubs use Auth0 for Authentication. This lets us programmatically # determine what auth provider each hub uses - GitHub, Google, etc. Without # this, we'd have to manually generate credentials for each hub - and we # don't want to do that. Auth0 domains are tied to a account, and # this is our auth0 domain for the paid account that 2i2c has. auth0 = config["auth0"] k = KeyProvider(auth0["domain"], auth0["client_id"], auth0["client_secret"]) # Each hub needs a unique proxy.secretToken. However, we don't want # to manually generate & save it. We also don't want it to change with # each deploy - that causes a pod restart with downtime. So instead, # we generate it based on a single secret key (`PROXY_SECRET_KEY`) # combined with the name of each hub. This way, we get unique, # cryptographically secure proxy.secretTokens without having to # keep much state. We can rotate them by changing `PROXY_SECRET_KEY`. # However, if `PROXY_SECRET_KEY` leaks, that means all the hub's # proxy.secretTokens have leaked. So let's be careful with that! SECRET_KEY = bytes.fromhex(config["secret_key"]) config_file_path = find_absolute_path_to_cluster_file(cluster_name) with open(config_file_path) as f: cluster = Cluster(yaml.load(f), config_file_path.parent) with cluster.
692
25
7,701
2i2c-org__infrastructure
a417fb77f4ccf22fae09e1bf79e3746eedc1db86
deployer/deploy_actions.py
Unknown
hubs
true
statement
11
11
false
true
[ "hubs", "support", "auth", "deploy_support", "spec", "auth_aws", "auth_azure", "auth_gcp", "auth_kubeconfig", "config_path", "ensure_docker_credhelpers", "__init__", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "auth", "type": "function" }, { "name": "auth_aws", "type": "function" }, { "name": "auth_azure", "type": "function" }, { "name": "auth_gcp", "type": "function" }, { "name": "auth_kubeconfig", "type": "function" }, { "name": "config_path", "type": "statement" }, { "name": "deploy_support", "type": "function" }, { "name": "ensure_docker_credhelpers", "type": "function" }, { "name": "hubs", "type": "statement" }, { "name": "spec", "type": "statement" }, { "name": "support", "type": "statement" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
""" Actions available when deploying many JupyterHubs to many Kubernetes clusters """ import os import shutil import subprocess from pathlib import Path from ruamel.yaml import YAML from auth import KeyProvider from cluster import Cluster from utils import print_colour from file_acquisition import find_absolute_path_to_cluster_file, get_decrypted_file from config_validation import ( validate_cluster_config, validate_hub_config, validate_support_config, ) # Without `pure=True`, I get an exception about str / byte issues yaml = YAML(typ="safe", pure=True) helm_charts_dir = Path(__file__).parent.parent.joinpath("helm-charts") def use_cluster_credentials(cluster_name): """ Quickly gain command-line access to a cluster by updating the current kubeconfig file to include the deployer's access credentials for the named cluster and mark it as the cluster to work against by default. This function is to be used with the `use-cluster-credentials` CLI command only - it is not used by the rest of the deployer codebase. """ validate_cluster_config(cluster_name) config_file_path = find_absolute_path_to_cluster_file(cluster_name) with open(config_file_path) as f: cluster = Cluster(yaml.load(f), config_file_path.parent) # Cluster.auth() method has the context manager decorator so cannot call # it like a normal function with cluster.auth(): # This command will spawn a new shell with all the env vars (including # KUBECONFIG) inherited, and once you quit that shell the python program # will resume as usual. # TODO: Figure out how to change the PS1 env var of the spawned shell # to change the prompt to f"cluster-{cluster.spec['name']}". This will # make it visually clear that the user is now operating in a different # shell. subprocess.check_call([os.environ["SHELL"], "-l"]) def deploy_support(cluster_name): """ Deploy support components to a cluster """ validate_cluster_config(cluster_name) validate_support_config(cluster_name) config_file_path = find_absolute_path_to_cluster_file(cluster_name) with open(config_file_path) as f: cluster = Cluster(yaml.load(f), config_file_path.parent) if cluster.support: with cluster.auth(): cluster.deploy_support() def deploy_grafana_dashboards(cluster_name): """ Deploy grafana dashboards to a cluster that provide useful metrics for operating a JupyterHub Grafana dashboards and deployment mechanism in question are maintained in this repo: https://github.com/jupyterhub/grafana-dashboards """ validate_cluster_config(cluster_name) validate_support_config(cluster_name) config_file_path = find_absolute_path_to_cluster_file(cluster_name) with open(config_file_path) as f: cluster = Cluster(yaml.load(f), config_file_path.parent) # If grafana support chart is not deployed, then there's nothing to do if not cluster.support: print_colour( "Support chart has not been deployed. Skipping Grafana dashboards deployment..." ) return grafana_token_file = (config_file_path.parent).joinpath( "enc-grafana-token.secret.yaml" ) # Read the cluster specific secret grafana token file with get_decrypted_file(grafana_token_file) as decrypted_file_path: with open(decrypted_file_path) as f: config = yaml.load(f) # Check GRAFANA_TOKEN exists in the secret config file before continuing if "grafana_token" not in config.keys(): raise ValueError( f"`grafana_token` not provided in secret file! Please add it and try again: {grafana_token_file}" ) # FIXME: We assume grafana_url and uses_tls config will be defined in the first # file listed under support.helm_chart_values_files. support_values_file = cluster.support.get("helm_chart_values_files", [])[0] with open(config_file_path.parent.joinpath(support_values_file)) as f: support_values_config = yaml.load(f) # Get the url where grafana is running from the support values file grafana_url = ( support_values_config.get("grafana", {}).get("ingress", {}).get("hosts", {}) ) uses_tls = ( support_values_config.get("grafana", {}).get("ingress", {}).get("tls", {}) ) if not grafana_url: print_colour( "Couldn't find `config.grafana.ingress.hosts`. Skipping Grafana dashboards deployment..." ) return grafana_url = ( f"https://{grafana_url[0]}" if uses_tls else f"http://{grafana_url[0]}" ) # Use the jupyterhub/grafana-dashboards deployer to deploy the dashboards to this cluster's grafana print_colour("Cloning jupyterhub/grafana-dashboards...") dashboards_dir = "grafana_dashboards" subprocess.check_call( [ "git", "clone", "https://github.com/jupyterhub/grafana-dashboards", dashboards_dir, ] ) # We need the existing env too for the deployer to be able to find jssonnet and grafonnet deploy_env = os.environ.copy() deploy_env.update({"GRAFANA_TOKEN": config["grafana_token"]}) try: print_colour(f"Deploying grafana dashboards to {cluster_name}...") subprocess.check_call( ["./deploy.py", grafana_url], env=deploy_env, cwd=dashboards_dir ) print_colour(f"Done! Dashboards deployed to {grafana_url}.") finally: # Delete the directory where we cloned the repo. # The deployer cannot call jsonnet to deploy the dashboards if using a temp directory here. # Might be because opening more than once of a temp file is tried # (https://docs.python.org/3.8/library/tempfile.html#tempfile.NamedTemporaryFile) shutil.rmtree(dashboards_dir) def deploy(cluster_name, hub_name, skip_hub_health_test, config_path): """ Deploy one or more hubs in a given cluster """ validate_cluster_config(cluster_name) validate_hub_config(cluster_name, hub_name) with get_decrypted_file(config_path) as decrypted_file_path: with open(decrypted_file_path) as f: config = yaml.load(f) # All our hubs use Auth0 for Authentication. This lets us programmatically # determine what auth provider each hub uses - GitHub, Google, etc. Without # this, we'd have to manually generate credentials for each hub - and we # don't want to do that. Auth0 domains are tied to a account, and # this is our auth0 domain for the paid account that 2i2c has. auth0 = config["auth0"] k = KeyProvider(auth0["domain"], auth0["client_id"], auth0["client_secret"]) # Each hub needs a unique proxy.secretToken. However, we don't want # to manually generate & save it. We also don't want it to change with # each deploy - that causes a pod restart with downtime. So instead, # we generate it based on a single secret key (`PROXY_SECRET_KEY`) # combined with the name of each hub. This way, we get unique, # cryptographically secure proxy.secretTokens without having to # keep much state. We can rotate them by changing `PROXY_SECRET_KEY`. # However, if `PROXY_SECRET_KEY` leaks, that means all the hub's # proxy.secretTokens have leaked. So let's be careful with that! SECRET_KEY = bytes.fromhex(config["secret_key"]) config_file_path = find_absolute_path_to_cluster_file(cluster_name) with open(config_file_path) as f: cluster = Cluster(yaml.load(f), config_file_path.parent) with cluster.auth(): hubs = cluster.
694
27
1,038
eosphoros-ai__DB-GPT
f684a7f6f06920e0054501c253fe0048ed36d058
pilot/logs.py
Unknown
typing_console_handler
true
statement
18
18
false
true
[ "typing_console_handler", "json_logger", "logger", "typing_logger", "chat_plugins", "__init__", "_log", "console_handler", "debug", "double_check", "error", "file_handler", "get_log_directory", "info", "log_json", "set_level", "speak_mode", "typewriter_log", "warn", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__getstate__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "chat_plugins", "type": "statement" }, { "name": "console_handler", "type": "statement" }, { "name": "debug", "type": "function" }, { "name": "double_check", "type": "function" }, { "name": "error", "type": "function" }, { "name": "file_handler", "type": "statement" }, { "name": "get_log_directory", "type": "function" }, { "name": "info", "type": "function" }, { "name": "json_logger", "type": "statement" }, { "name": "log_json", "type": "function" }, { "name": "logger", "type": "statement" }, { "name": "set_level", "type": "function" }, { "name": "speak_mode", "type": "statement" }, { "name": "typewriter_log", "type": "function" }, { "name": "typing_console_handler", "type": "statement" }, { "name": "typing_logger", "type": "statement" }, { "name": "warn", "type": "function" }, { "name": "_log", "type": "function" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import logging import os import random import re import time from logging import LogRecord from typing import Any from colorama import Fore, Style from pilot.log.json_handler import JsonFileHandler, JsonFormatter from pilot.singleton import Singleton from pilot.speech import say_text class Logger(metaclass=Singleton): """ Logger that handle titles in different colors. Outputs logs in console, activity.log, and errors.log For console handler: simulates typing """ def __init__(self): # create log directory if it doesn't exist this_files_dir_path = os.path.dirname(__file__) log_dir = os.path.join(this_files_dir_path, "../logs") if not os.path.exists(log_dir): os.makedirs(log_dir) log_file = "activity.log" error_file = "error.log" console_formatter = DbGptFormatter("%(title_color)s %(message)s") # Create a handler for console which simulate typing self.typing_console_handler = TypingConsoleHandler() self.
695
27
1,097
eosphoros-ai__DB-GPT
f684a7f6f06920e0054501c253fe0048ed36d058
pilot/logs.py
Unknown
typing_console_handler
true
statement
18
18
false
false
[ "typing_console_handler", "json_logger", "logger", "typing_logger", "chat_plugins", "__init__", "_log", "console_handler", "debug", "double_check", "error", "file_handler", "get_log_directory", "info", "log_json", "set_level", "speak_mode", "typewriter_log", "warn", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__getstate__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "chat_plugins", "type": "statement" }, { "name": "console_handler", "type": "statement" }, { "name": "debug", "type": "function" }, { "name": "double_check", "type": "function" }, { "name": "error", "type": "function" }, { "name": "file_handler", "type": "statement" }, { "name": "get_log_directory", "type": "function" }, { "name": "info", "type": "function" }, { "name": "json_logger", "type": "statement" }, { "name": "log_json", "type": "function" }, { "name": "logger", "type": "statement" }, { "name": "set_level", "type": "function" }, { "name": "speak_mode", "type": "statement" }, { "name": "typewriter_log", "type": "function" }, { "name": "typing_console_handler", "type": "statement" }, { "name": "typing_logger", "type": "statement" }, { "name": "warn", "type": "function" }, { "name": "_log", "type": "function" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import logging import os import random import re import time from logging import LogRecord from typing import Any from colorama import Fore, Style from pilot.log.json_handler import JsonFileHandler, JsonFormatter from pilot.singleton import Singleton from pilot.speech import say_text class Logger(metaclass=Singleton): """ Logger that handle titles in different colors. Outputs logs in console, activity.log, and errors.log For console handler: simulates typing """ def __init__(self): # create log directory if it doesn't exist this_files_dir_path = os.path.dirname(__file__) log_dir = os.path.join(this_files_dir_path, "../logs") if not os.path.exists(log_dir): os.makedirs(log_dir) log_file = "activity.log" error_file = "error.log" console_formatter = DbGptFormatter("%(title_color)s %(message)s") # Create a handler for console which simulate typing self.typing_console_handler = TypingConsoleHandler() self.typing_console_handler.setLevel(logging.INFO) self.
697
27
1,279
eosphoros-ai__DB-GPT
f684a7f6f06920e0054501c253fe0048ed36d058
pilot/logs.py
Unknown
console_handler
true
statement
18
18
false
true
[ "console_handler", "json_logger", "logger", "typing_console_handler", "typing_logger", "__init__", "_log", "chat_plugins", "debug", "double_check", "error", "file_handler", "get_log_directory", "info", "log_json", "set_level", "speak_mode", "typewriter_log", "warn", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__getstate__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "chat_plugins", "type": "statement" }, { "name": "console_handler", "type": "statement" }, { "name": "debug", "type": "function" }, { "name": "double_check", "type": "function" }, { "name": "error", "type": "function" }, { "name": "file_handler", "type": "statement" }, { "name": "get_log_directory", "type": "function" }, { "name": "info", "type": "function" }, { "name": "json_logger", "type": "statement" }, { "name": "log_json", "type": "function" }, { "name": "logger", "type": "statement" }, { "name": "set_level", "type": "function" }, { "name": "speak_mode", "type": "statement" }, { "name": "typewriter_log", "type": "function" }, { "name": "typing_console_handler", "type": "statement" }, { "name": "typing_logger", "type": "statement" }, { "name": "warn", "type": "function" }, { "name": "_log", "type": "function" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import logging import os import random import re import time from logging import LogRecord from typing import Any from colorama import Fore, Style from pilot.log.json_handler import JsonFileHandler, JsonFormatter from pilot.singleton import Singleton from pilot.speech import say_text class Logger(metaclass=Singleton): """ Logger that handle titles in different colors. Outputs logs in console, activity.log, and errors.log For console handler: simulates typing """ def __init__(self): # create log directory if it doesn't exist this_files_dir_path = os.path.dirname(__file__) log_dir = os.path.join(this_files_dir_path, "../logs") if not os.path.exists(log_dir): os.makedirs(log_dir) log_file = "activity.log" error_file = "error.log" console_formatter = DbGptFormatter("%(title_color)s %(message)s") # Create a handler for console which simulate typing self.typing_console_handler = TypingConsoleHandler() self.typing_console_handler.setLevel(logging.INFO) self.typing_console_handler.setFormatter(console_formatter) # Create a handler for console without typing simulation self.console_handler = ConsoleHandler() self.
698
27
1,332
eosphoros-ai__DB-GPT
f684a7f6f06920e0054501c253fe0048ed36d058
pilot/logs.py
Unknown
console_handler
true
statement
18
18
false
false
[ "console_handler", "json_logger", "logger", "typing_console_handler", "typing_logger", "__init__", "_log", "chat_plugins", "debug", "double_check", "error", "file_handler", "get_log_directory", "info", "log_json", "set_level", "speak_mode", "typewriter_log", "warn", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__getstate__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "chat_plugins", "type": "statement" }, { "name": "console_handler", "type": "statement" }, { "name": "debug", "type": "function" }, { "name": "double_check", "type": "function" }, { "name": "error", "type": "function" }, { "name": "file_handler", "type": "statement" }, { "name": "get_log_directory", "type": "function" }, { "name": "info", "type": "function" }, { "name": "json_logger", "type": "statement" }, { "name": "log_json", "type": "function" }, { "name": "logger", "type": "statement" }, { "name": "set_level", "type": "function" }, { "name": "speak_mode", "type": "statement" }, { "name": "typewriter_log", "type": "function" }, { "name": "typing_console_handler", "type": "statement" }, { "name": "typing_logger", "type": "statement" }, { "name": "warn", "type": "function" }, { "name": "_log", "type": "function" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import logging import os import random import re import time from logging import LogRecord from typing import Any from colorama import Fore, Style from pilot.log.json_handler import JsonFileHandler, JsonFormatter from pilot.singleton import Singleton from pilot.speech import say_text class Logger(metaclass=Singleton): """ Logger that handle titles in different colors. Outputs logs in console, activity.log, and errors.log For console handler: simulates typing """ def __init__(self): # create log directory if it doesn't exist this_files_dir_path = os.path.dirname(__file__) log_dir = os.path.join(this_files_dir_path, "../logs") if not os.path.exists(log_dir): os.makedirs(log_dir) log_file = "activity.log" error_file = "error.log" console_formatter = DbGptFormatter("%(title_color)s %(message)s") # Create a handler for console which simulate typing self.typing_console_handler = TypingConsoleHandler() self.typing_console_handler.setLevel(logging.INFO) self.typing_console_handler.setFormatter(console_formatter) # Create a handler for console without typing simulation self.console_handler = ConsoleHandler() self.console_handler.setLevel(logging.DEBUG) self.
700
27
1,550
eosphoros-ai__DB-GPT
f684a7f6f06920e0054501c253fe0048ed36d058
pilot/logs.py
Unknown
file_handler
true
statement
18
18
false
true
[ "console_handler", "file_handler", "json_logger", "typing_console_handler", "typing_logger", "__init__", "_log", "chat_plugins", "debug", "double_check", "error", "get_log_directory", "info", "log_json", "logger", "set_level", "speak_mode", "typewriter_log", "warn", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__getstate__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "chat_plugins", "type": "statement" }, { "name": "console_handler", "type": "statement" }, { "name": "debug", "type": "function" }, { "name": "double_check", "type": "function" }, { "name": "error", "type": "function" }, { "name": "file_handler", "type": "statement" }, { "name": "get_log_directory", "type": "function" }, { "name": "info", "type": "function" }, { "name": "json_logger", "type": "statement" }, { "name": "log_json", "type": "function" }, { "name": "logger", "type": "statement" }, { "name": "set_level", "type": "function" }, { "name": "speak_mode", "type": "statement" }, { "name": "typewriter_log", "type": "function" }, { "name": "typing_console_handler", "type": "statement" }, { "name": "typing_logger", "type": "statement" }, { "name": "warn", "type": "function" }, { "name": "_log", "type": "function" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import logging import os import random import re import time from logging import LogRecord from typing import Any from colorama import Fore, Style from pilot.log.json_handler import JsonFileHandler, JsonFormatter from pilot.singleton import Singleton from pilot.speech import say_text class Logger(metaclass=Singleton): """ Logger that handle titles in different colors. Outputs logs in console, activity.log, and errors.log For console handler: simulates typing """ def __init__(self): # create log directory if it doesn't exist this_files_dir_path = os.path.dirname(__file__) log_dir = os.path.join(this_files_dir_path, "../logs") if not os.path.exists(log_dir): os.makedirs(log_dir) log_file = "activity.log" error_file = "error.log" console_formatter = DbGptFormatter("%(title_color)s %(message)s") # Create a handler for console which simulate typing self.typing_console_handler = TypingConsoleHandler() self.typing_console_handler.setLevel(logging.INFO) self.typing_console_handler.setFormatter(console_formatter) # Create a handler for console without typing simulation self.console_handler = ConsoleHandler() self.console_handler.setLevel(logging.DEBUG) self.console_handler.setFormatter(console_formatter) # Info handler in activity.log self.file_handler = logging.FileHandler( os.path.join(log_dir, log_file), "a", "utf-8" ) self.
701
27
1,722
eosphoros-ai__DB-GPT
f684a7f6f06920e0054501c253fe0048ed36d058
pilot/logs.py
Unknown
file_handler
true
statement
18
18
false
false
[ "console_handler", "file_handler", "json_logger", "typing_console_handler", "typing_logger", "__init__", "_log", "chat_plugins", "debug", "double_check", "error", "get_log_directory", "info", "log_json", "logger", "set_level", "speak_mode", "typewriter_log", "warn", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__getstate__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "chat_plugins", "type": "statement" }, { "name": "console_handler", "type": "statement" }, { "name": "debug", "type": "function" }, { "name": "double_check", "type": "function" }, { "name": "error", "type": "function" }, { "name": "file_handler", "type": "statement" }, { "name": "get_log_directory", "type": "function" }, { "name": "info", "type": "function" }, { "name": "json_logger", "type": "statement" }, { "name": "log_json", "type": "function" }, { "name": "logger", "type": "statement" }, { "name": "set_level", "type": "function" }, { "name": "speak_mode", "type": "statement" }, { "name": "typewriter_log", "type": "function" }, { "name": "typing_console_handler", "type": "statement" }, { "name": "typing_logger", "type": "statement" }, { "name": "warn", "type": "function" }, { "name": "_log", "type": "function" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import logging import os import random import re import time from logging import LogRecord from typing import Any from colorama import Fore, Style from pilot.log.json_handler import JsonFileHandler, JsonFormatter from pilot.singleton import Singleton from pilot.speech import say_text class Logger(metaclass=Singleton): """ Logger that handle titles in different colors. Outputs logs in console, activity.log, and errors.log For console handler: simulates typing """ def __init__(self): # create log directory if it doesn't exist this_files_dir_path = os.path.dirname(__file__) log_dir = os.path.join(this_files_dir_path, "../logs") if not os.path.exists(log_dir): os.makedirs(log_dir) log_file = "activity.log" error_file = "error.log" console_formatter = DbGptFormatter("%(title_color)s %(message)s") # Create a handler for console which simulate typing self.typing_console_handler = TypingConsoleHandler() self.typing_console_handler.setLevel(logging.INFO) self.typing_console_handler.setFormatter(console_formatter) # Create a handler for console without typing simulation self.console_handler = ConsoleHandler() self.console_handler.setLevel(logging.DEBUG) self.console_handler.setFormatter(console_formatter) # Info handler in activity.log self.file_handler = logging.FileHandler( os.path.join(log_dir, log_file), "a", "utf-8" ) self.file_handler.setLevel(logging.DEBUG) info_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(title)s %(message_no_color)s" ) self.
703
27
2,255
eosphoros-ai__DB-GPT
f684a7f6f06920e0054501c253fe0048ed36d058
pilot/logs.py
Unknown
typing_logger
true
statement
18
18
false
true
[ "typing_logger", "console_handler", "file_handler", "json_logger", "logger", "__init__", "_log", "chat_plugins", "debug", "double_check", "error", "get_log_directory", "info", "log_json", "set_level", "speak_mode", "typewriter_log", "typing_console_handler", "warn", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__getstate__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "chat_plugins", "type": "statement" }, { "name": "console_handler", "type": "statement" }, { "name": "debug", "type": "function" }, { "name": "double_check", "type": "function" }, { "name": "error", "type": "function" }, { "name": "file_handler", "type": "statement" }, { "name": "get_log_directory", "type": "function" }, { "name": "info", "type": "function" }, { "name": "json_logger", "type": "statement" }, { "name": "log_json", "type": "function" }, { "name": "logger", "type": "statement" }, { "name": "set_level", "type": "function" }, { "name": "speak_mode", "type": "statement" }, { "name": "typewriter_log", "type": "function" }, { "name": "typing_console_handler", "type": "statement" }, { "name": "typing_logger", "type": "statement" }, { "name": "warn", "type": "function" }, { "name": "_log", "type": "function" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import logging import os import random import re import time from logging import LogRecord from typing import Any from colorama import Fore, Style from pilot.log.json_handler import JsonFileHandler, JsonFormatter from pilot.singleton import Singleton from pilot.speech import say_text class Logger(metaclass=Singleton): """ Logger that handle titles in different colors. Outputs logs in console, activity.log, and errors.log For console handler: simulates typing """ def __init__(self): # create log directory if it doesn't exist this_files_dir_path = os.path.dirname(__file__) log_dir = os.path.join(this_files_dir_path, "../logs") if not os.path.exists(log_dir): os.makedirs(log_dir) log_file = "activity.log" error_file = "error.log" console_formatter = DbGptFormatter("%(title_color)s %(message)s") # Create a handler for console which simulate typing self.typing_console_handler = TypingConsoleHandler() self.typing_console_handler.setLevel(logging.INFO) self.typing_console_handler.setFormatter(console_formatter) # Create a handler for console without typing simulation self.console_handler = ConsoleHandler() self.console_handler.setLevel(logging.DEBUG) self.console_handler.setFormatter(console_formatter) # Info handler in activity.log self.file_handler = logging.FileHandler( os.path.join(log_dir, log_file), "a", "utf-8" ) self.file_handler.setLevel(logging.DEBUG) info_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(title)s %(message_no_color)s" ) self.file_handler.setFormatter(info_formatter) # Error handler error.log error_handler = logging.FileHandler( os.path.join(log_dir, error_file), "a", "utf-8" ) error_handler.setLevel(logging.ERROR) error_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s" " %(message_no_color)s" ) error_handler.setFormatter(error_formatter) self.typing_logger = logging.getLogger("TYPER") self.
704
27
2,285
eosphoros-ai__DB-GPT
f684a7f6f06920e0054501c253fe0048ed36d058
pilot/logs.py
Unknown
typing_console_handler
true
statement
18
18
false
false
[ "file_handler", "logger", "console_handler", "json_logger", "typing_logger", "__init__", "_log", "chat_plugins", "debug", "double_check", "error", "get_log_directory", "info", "log_json", "set_level", "speak_mode", "typewriter_log", "typing_console_handler", "warn", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__getstate__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "chat_plugins", "type": "statement" }, { "name": "console_handler", "type": "statement" }, { "name": "debug", "type": "function" }, { "name": "double_check", "type": "function" }, { "name": "error", "type": "function" }, { "name": "file_handler", "type": "statement" }, { "name": "get_log_directory", "type": "function" }, { "name": "info", "type": "function" }, { "name": "json_logger", "type": "statement" }, { "name": "log_json", "type": "function" }, { "name": "logger", "type": "statement" }, { "name": "set_level", "type": "function" }, { "name": "speak_mode", "type": "statement" }, { "name": "typewriter_log", "type": "function" }, { "name": "typing_console_handler", "type": "statement" }, { "name": "typing_logger", "type": "statement" }, { "name": "warn", "type": "function" }, { "name": "_log", "type": "function" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import logging import os import random import re import time from logging import LogRecord from typing import Any from colorama import Fore, Style from pilot.log.json_handler import JsonFileHandler, JsonFormatter from pilot.singleton import Singleton from pilot.speech import say_text class Logger(metaclass=Singleton): """ Logger that handle titles in different colors. Outputs logs in console, activity.log, and errors.log For console handler: simulates typing """ def __init__(self): # create log directory if it doesn't exist this_files_dir_path = os.path.dirname(__file__) log_dir = os.path.join(this_files_dir_path, "../logs") if not os.path.exists(log_dir): os.makedirs(log_dir) log_file = "activity.log" error_file = "error.log" console_formatter = DbGptFormatter("%(title_color)s %(message)s") # Create a handler for console which simulate typing self.typing_console_handler = TypingConsoleHandler() self.typing_console_handler.setLevel(logging.INFO) self.typing_console_handler.setFormatter(console_formatter) # Create a handler for console without typing simulation self.console_handler = ConsoleHandler() self.console_handler.setLevel(logging.DEBUG) self.console_handler.setFormatter(console_formatter) # Info handler in activity.log self.file_handler = logging.FileHandler( os.path.join(log_dir, log_file), "a", "utf-8" ) self.file_handler.setLevel(logging.DEBUG) info_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(title)s %(message_no_color)s" ) self.file_handler.setFormatter(info_formatter) # Error handler error.log error_handler = logging.FileHandler( os.path.join(log_dir, error_file), "a", "utf-8" ) error_handler.setLevel(logging.ERROR) error_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s" " %(message_no_color)s" ) error_handler.setFormatter(error_formatter) self.typing_logger = logging.getLogger("TYPER") self.typing_logger.addHandler(self.
705
27
2,322
eosphoros-ai__DB-GPT
f684a7f6f06920e0054501c253fe0048ed36d058
pilot/logs.py
Unknown
typing_logger
true
statement
18
18
false
false
[ "typing_logger", "console_handler", "file_handler", "json_logger", "logger", "__init__", "_log", "chat_plugins", "debug", "double_check", "error", "get_log_directory", "info", "log_json", "set_level", "speak_mode", "typewriter_log", "typing_console_handler", "warn", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__getstate__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "chat_plugins", "type": "statement" }, { "name": "console_handler", "type": "statement" }, { "name": "debug", "type": "function" }, { "name": "double_check", "type": "function" }, { "name": "error", "type": "function" }, { "name": "file_handler", "type": "statement" }, { "name": "get_log_directory", "type": "function" }, { "name": "info", "type": "function" }, { "name": "json_logger", "type": "statement" }, { "name": "log_json", "type": "function" }, { "name": "logger", "type": "statement" }, { "name": "set_level", "type": "function" }, { "name": "speak_mode", "type": "statement" }, { "name": "typewriter_log", "type": "function" }, { "name": "typing_console_handler", "type": "statement" }, { "name": "typing_logger", "type": "statement" }, { "name": "warn", "type": "function" }, { "name": "_log", "type": "function" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import logging import os import random import re import time from logging import LogRecord from typing import Any from colorama import Fore, Style from pilot.log.json_handler import JsonFileHandler, JsonFormatter from pilot.singleton import Singleton from pilot.speech import say_text class Logger(metaclass=Singleton): """ Logger that handle titles in different colors. Outputs logs in console, activity.log, and errors.log For console handler: simulates typing """ def __init__(self): # create log directory if it doesn't exist this_files_dir_path = os.path.dirname(__file__) log_dir = os.path.join(this_files_dir_path, "../logs") if not os.path.exists(log_dir): os.makedirs(log_dir) log_file = "activity.log" error_file = "error.log" console_formatter = DbGptFormatter("%(title_color)s %(message)s") # Create a handler for console which simulate typing self.typing_console_handler = TypingConsoleHandler() self.typing_console_handler.setLevel(logging.INFO) self.typing_console_handler.setFormatter(console_formatter) # Create a handler for console without typing simulation self.console_handler = ConsoleHandler() self.console_handler.setLevel(logging.DEBUG) self.console_handler.setFormatter(console_formatter) # Info handler in activity.log self.file_handler = logging.FileHandler( os.path.join(log_dir, log_file), "a", "utf-8" ) self.file_handler.setLevel(logging.DEBUG) info_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(title)s %(message_no_color)s" ) self.file_handler.setFormatter(info_formatter) # Error handler error.log error_handler = logging.FileHandler( os.path.join(log_dir, error_file), "a", "utf-8" ) error_handler.setLevel(logging.ERROR) error_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s" " %(message_no_color)s" ) error_handler.setFormatter(error_formatter) self.typing_logger = logging.getLogger("TYPER") self.typing_logger.addHandler(self.typing_console_handler) self.
706
27
2,352
eosphoros-ai__DB-GPT
f684a7f6f06920e0054501c253fe0048ed36d058
pilot/logs.py
Unknown
file_handler
true
statement
18
18
false
false
[ "file_handler", "logger", "console_handler", "json_logger", "typing_logger", "__init__", "_log", "chat_plugins", "debug", "double_check", "error", "get_log_directory", "info", "log_json", "set_level", "speak_mode", "typewriter_log", "typing_console_handler", "warn", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__getstate__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "chat_plugins", "type": "statement" }, { "name": "console_handler", "type": "statement" }, { "name": "debug", "type": "function" }, { "name": "double_check", "type": "function" }, { "name": "error", "type": "function" }, { "name": "file_handler", "type": "statement" }, { "name": "get_log_directory", "type": "function" }, { "name": "info", "type": "function" }, { "name": "json_logger", "type": "statement" }, { "name": "log_json", "type": "function" }, { "name": "logger", "type": "statement" }, { "name": "set_level", "type": "function" }, { "name": "speak_mode", "type": "statement" }, { "name": "typewriter_log", "type": "function" }, { "name": "typing_console_handler", "type": "statement" }, { "name": "typing_logger", "type": "statement" }, { "name": "warn", "type": "function" }, { "name": "_log", "type": "function" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import logging import os import random import re import time from logging import LogRecord from typing import Any from colorama import Fore, Style from pilot.log.json_handler import JsonFileHandler, JsonFormatter from pilot.singleton import Singleton from pilot.speech import say_text class Logger(metaclass=Singleton): """ Logger that handle titles in different colors. Outputs logs in console, activity.log, and errors.log For console handler: simulates typing """ def __init__(self): # create log directory if it doesn't exist this_files_dir_path = os.path.dirname(__file__) log_dir = os.path.join(this_files_dir_path, "../logs") if not os.path.exists(log_dir): os.makedirs(log_dir) log_file = "activity.log" error_file = "error.log" console_formatter = DbGptFormatter("%(title_color)s %(message)s") # Create a handler for console which simulate typing self.typing_console_handler = TypingConsoleHandler() self.typing_console_handler.setLevel(logging.INFO) self.typing_console_handler.setFormatter(console_formatter) # Create a handler for console without typing simulation self.console_handler = ConsoleHandler() self.console_handler.setLevel(logging.DEBUG) self.console_handler.setFormatter(console_formatter) # Info handler in activity.log self.file_handler = logging.FileHandler( os.path.join(log_dir, log_file), "a", "utf-8" ) self.file_handler.setLevel(logging.DEBUG) info_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(title)s %(message_no_color)s" ) self.file_handler.setFormatter(info_formatter) # Error handler error.log error_handler = logging.FileHandler( os.path.join(log_dir, error_file), "a", "utf-8" ) error_handler.setLevel(logging.ERROR) error_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s" " %(message_no_color)s" ) error_handler.setFormatter(error_formatter) self.typing_logger = logging.getLogger("TYPER") self.typing_logger.addHandler(self.typing_console_handler) self.typing_logger.addHandler(self.
707
27
2,379
eosphoros-ai__DB-GPT
f684a7f6f06920e0054501c253fe0048ed36d058
pilot/logs.py
Unknown
typing_logger
true
statement
18
18
false
false
[ "typing_logger", "logger", "console_handler", "file_handler", "json_logger", "__init__", "_log", "chat_plugins", "debug", "double_check", "error", "get_log_directory", "info", "log_json", "set_level", "speak_mode", "typewriter_log", "typing_console_handler", "warn", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__getstate__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "chat_plugins", "type": "statement" }, { "name": "console_handler", "type": "statement" }, { "name": "debug", "type": "function" }, { "name": "double_check", "type": "function" }, { "name": "error", "type": "function" }, { "name": "file_handler", "type": "statement" }, { "name": "get_log_directory", "type": "function" }, { "name": "info", "type": "function" }, { "name": "json_logger", "type": "statement" }, { "name": "log_json", "type": "function" }, { "name": "logger", "type": "statement" }, { "name": "set_level", "type": "function" }, { "name": "speak_mode", "type": "statement" }, { "name": "typewriter_log", "type": "function" }, { "name": "typing_console_handler", "type": "statement" }, { "name": "typing_logger", "type": "statement" }, { "name": "warn", "type": "function" }, { "name": "_log", "type": "function" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import logging import os import random import re import time from logging import LogRecord from typing import Any from colorama import Fore, Style from pilot.log.json_handler import JsonFileHandler, JsonFormatter from pilot.singleton import Singleton from pilot.speech import say_text class Logger(metaclass=Singleton): """ Logger that handle titles in different colors. Outputs logs in console, activity.log, and errors.log For console handler: simulates typing """ def __init__(self): # create log directory if it doesn't exist this_files_dir_path = os.path.dirname(__file__) log_dir = os.path.join(this_files_dir_path, "../logs") if not os.path.exists(log_dir): os.makedirs(log_dir) log_file = "activity.log" error_file = "error.log" console_formatter = DbGptFormatter("%(title_color)s %(message)s") # Create a handler for console which simulate typing self.typing_console_handler = TypingConsoleHandler() self.typing_console_handler.setLevel(logging.INFO) self.typing_console_handler.setFormatter(console_formatter) # Create a handler for console without typing simulation self.console_handler = ConsoleHandler() self.console_handler.setLevel(logging.DEBUG) self.console_handler.setFormatter(console_formatter) # Info handler in activity.log self.file_handler = logging.FileHandler( os.path.join(log_dir, log_file), "a", "utf-8" ) self.file_handler.setLevel(logging.DEBUG) info_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(title)s %(message_no_color)s" ) self.file_handler.setFormatter(info_formatter) # Error handler error.log error_handler = logging.FileHandler( os.path.join(log_dir, error_file), "a", "utf-8" ) error_handler.setLevel(logging.ERROR) error_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s" " %(message_no_color)s" ) error_handler.setFormatter(error_formatter) self.typing_logger = logging.getLogger("TYPER") self.typing_logger.addHandler(self.typing_console_handler) self.typing_logger.addHandler(self.file_handler) self.
708
27
2,432
eosphoros-ai__DB-GPT
f684a7f6f06920e0054501c253fe0048ed36d058
pilot/logs.py
Unknown
typing_logger
true
statement
18
18
false
false
[ "typing_logger", "logger", "console_handler", "file_handler", "json_logger", "__init__", "_log", "chat_plugins", "debug", "double_check", "error", "get_log_directory", "info", "log_json", "set_level", "speak_mode", "typewriter_log", "typing_console_handler", "warn", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__getstate__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "chat_plugins", "type": "statement" }, { "name": "console_handler", "type": "statement" }, { "name": "debug", "type": "function" }, { "name": "double_check", "type": "function" }, { "name": "error", "type": "function" }, { "name": "file_handler", "type": "statement" }, { "name": "get_log_directory", "type": "function" }, { "name": "info", "type": "function" }, { "name": "json_logger", "type": "statement" }, { "name": "log_json", "type": "function" }, { "name": "logger", "type": "statement" }, { "name": "set_level", "type": "function" }, { "name": "speak_mode", "type": "statement" }, { "name": "typewriter_log", "type": "function" }, { "name": "typing_console_handler", "type": "statement" }, { "name": "typing_logger", "type": "statement" }, { "name": "warn", "type": "function" }, { "name": "_log", "type": "function" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import logging import os import random import re import time from logging import LogRecord from typing import Any from colorama import Fore, Style from pilot.log.json_handler import JsonFileHandler, JsonFormatter from pilot.singleton import Singleton from pilot.speech import say_text class Logger(metaclass=Singleton): """ Logger that handle titles in different colors. Outputs logs in console, activity.log, and errors.log For console handler: simulates typing """ def __init__(self): # create log directory if it doesn't exist this_files_dir_path = os.path.dirname(__file__) log_dir = os.path.join(this_files_dir_path, "../logs") if not os.path.exists(log_dir): os.makedirs(log_dir) log_file = "activity.log" error_file = "error.log" console_formatter = DbGptFormatter("%(title_color)s %(message)s") # Create a handler for console which simulate typing self.typing_console_handler = TypingConsoleHandler() self.typing_console_handler.setLevel(logging.INFO) self.typing_console_handler.setFormatter(console_formatter) # Create a handler for console without typing simulation self.console_handler = ConsoleHandler() self.console_handler.setLevel(logging.DEBUG) self.console_handler.setFormatter(console_formatter) # Info handler in activity.log self.file_handler = logging.FileHandler( os.path.join(log_dir, log_file), "a", "utf-8" ) self.file_handler.setLevel(logging.DEBUG) info_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(title)s %(message_no_color)s" ) self.file_handler.setFormatter(info_formatter) # Error handler error.log error_handler = logging.FileHandler( os.path.join(log_dir, error_file), "a", "utf-8" ) error_handler.setLevel(logging.ERROR) error_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s" " %(message_no_color)s" ) error_handler.setFormatter(error_formatter) self.typing_logger = logging.getLogger("TYPER") self.typing_logger.addHandler(self.typing_console_handler) self.typing_logger.addHandler(self.file_handler) self.typing_logger.addHandler(error_handler) self.
710
27
2,534
eosphoros-ai__DB-GPT
f684a7f6f06920e0054501c253fe0048ed36d058
pilot/logs.py
Unknown
logger
true
statement
18
18
false
true
[ "logger", "typing_logger", "console_handler", "file_handler", "json_logger", "__init__", "_log", "chat_plugins", "debug", "double_check", "error", "get_log_directory", "info", "log_json", "set_level", "speak_mode", "typewriter_log", "typing_console_handler", "warn", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__getstate__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "chat_plugins", "type": "statement" }, { "name": "console_handler", "type": "statement" }, { "name": "debug", "type": "function" }, { "name": "double_check", "type": "function" }, { "name": "error", "type": "function" }, { "name": "file_handler", "type": "statement" }, { "name": "get_log_directory", "type": "function" }, { "name": "info", "type": "function" }, { "name": "json_logger", "type": "statement" }, { "name": "log_json", "type": "function" }, { "name": "logger", "type": "statement" }, { "name": "set_level", "type": "function" }, { "name": "speak_mode", "type": "statement" }, { "name": "typewriter_log", "type": "function" }, { "name": "typing_console_handler", "type": "statement" }, { "name": "typing_logger", "type": "statement" }, { "name": "warn", "type": "function" }, { "name": "_log", "type": "function" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import logging import os import random import re import time from logging import LogRecord from typing import Any from colorama import Fore, Style from pilot.log.json_handler import JsonFileHandler, JsonFormatter from pilot.singleton import Singleton from pilot.speech import say_text class Logger(metaclass=Singleton): """ Logger that handle titles in different colors. Outputs logs in console, activity.log, and errors.log For console handler: simulates typing """ def __init__(self): # create log directory if it doesn't exist this_files_dir_path = os.path.dirname(__file__) log_dir = os.path.join(this_files_dir_path, "../logs") if not os.path.exists(log_dir): os.makedirs(log_dir) log_file = "activity.log" error_file = "error.log" console_formatter = DbGptFormatter("%(title_color)s %(message)s") # Create a handler for console which simulate typing self.typing_console_handler = TypingConsoleHandler() self.typing_console_handler.setLevel(logging.INFO) self.typing_console_handler.setFormatter(console_formatter) # Create a handler for console without typing simulation self.console_handler = ConsoleHandler() self.console_handler.setLevel(logging.DEBUG) self.console_handler.setFormatter(console_formatter) # Info handler in activity.log self.file_handler = logging.FileHandler( os.path.join(log_dir, log_file), "a", "utf-8" ) self.file_handler.setLevel(logging.DEBUG) info_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(title)s %(message_no_color)s" ) self.file_handler.setFormatter(info_formatter) # Error handler error.log error_handler = logging.FileHandler( os.path.join(log_dir, error_file), "a", "utf-8" ) error_handler.setLevel(logging.ERROR) error_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s" " %(message_no_color)s" ) error_handler.setFormatter(error_formatter) self.typing_logger = logging.getLogger("TYPER") self.typing_logger.addHandler(self.typing_console_handler) self.typing_logger.addHandler(self.file_handler) self.typing_logger.addHandler(error_handler) self.typing_logger.setLevel(logging.DEBUG) self.logger = logging.getLogger("LOGGER") self.
711
27
2,557
eosphoros-ai__DB-GPT
f684a7f6f06920e0054501c253fe0048ed36d058
pilot/logs.py
Unknown
console_handler
true
statement
18
18
false
false
[ "file_handler", "typing_logger", "logger", "json_logger", "console_handler", "__init__", "_log", "chat_plugins", "debug", "double_check", "error", "get_log_directory", "info", "log_json", "set_level", "speak_mode", "typewriter_log", "typing_console_handler", "warn", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__getstate__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "chat_plugins", "type": "statement" }, { "name": "console_handler", "type": "statement" }, { "name": "debug", "type": "function" }, { "name": "double_check", "type": "function" }, { "name": "error", "type": "function" }, { "name": "file_handler", "type": "statement" }, { "name": "get_log_directory", "type": "function" }, { "name": "info", "type": "function" }, { "name": "json_logger", "type": "statement" }, { "name": "log_json", "type": "function" }, { "name": "logger", "type": "statement" }, { "name": "set_level", "type": "function" }, { "name": "speak_mode", "type": "statement" }, { "name": "typewriter_log", "type": "function" }, { "name": "typing_console_handler", "type": "statement" }, { "name": "typing_logger", "type": "statement" }, { "name": "warn", "type": "function" }, { "name": "_log", "type": "function" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import logging import os import random import re import time from logging import LogRecord from typing import Any from colorama import Fore, Style from pilot.log.json_handler import JsonFileHandler, JsonFormatter from pilot.singleton import Singleton from pilot.speech import say_text class Logger(metaclass=Singleton): """ Logger that handle titles in different colors. Outputs logs in console, activity.log, and errors.log For console handler: simulates typing """ def __init__(self): # create log directory if it doesn't exist this_files_dir_path = os.path.dirname(__file__) log_dir = os.path.join(this_files_dir_path, "../logs") if not os.path.exists(log_dir): os.makedirs(log_dir) log_file = "activity.log" error_file = "error.log" console_formatter = DbGptFormatter("%(title_color)s %(message)s") # Create a handler for console which simulate typing self.typing_console_handler = TypingConsoleHandler() self.typing_console_handler.setLevel(logging.INFO) self.typing_console_handler.setFormatter(console_formatter) # Create a handler for console without typing simulation self.console_handler = ConsoleHandler() self.console_handler.setLevel(logging.DEBUG) self.console_handler.setFormatter(console_formatter) # Info handler in activity.log self.file_handler = logging.FileHandler( os.path.join(log_dir, log_file), "a", "utf-8" ) self.file_handler.setLevel(logging.DEBUG) info_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(title)s %(message_no_color)s" ) self.file_handler.setFormatter(info_formatter) # Error handler error.log error_handler = logging.FileHandler( os.path.join(log_dir, error_file), "a", "utf-8" ) error_handler.setLevel(logging.ERROR) error_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s" " %(message_no_color)s" ) error_handler.setFormatter(error_formatter) self.typing_logger = logging.getLogger("TYPER") self.typing_logger.addHandler(self.typing_console_handler) self.typing_logger.addHandler(self.file_handler) self.typing_logger.addHandler(error_handler) self.typing_logger.setLevel(logging.DEBUG) self.logger = logging.getLogger("LOGGER") self.logger.addHandler(self.
712
27
2,587
eosphoros-ai__DB-GPT
f684a7f6f06920e0054501c253fe0048ed36d058
pilot/logs.py
Unknown
logger
true
statement
18
18
false
false
[ "logger", "typing_logger", "console_handler", "file_handler", "json_logger", "__init__", "_log", "chat_plugins", "debug", "double_check", "error", "get_log_directory", "info", "log_json", "set_level", "speak_mode", "typewriter_log", "typing_console_handler", "warn", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__getstate__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "chat_plugins", "type": "statement" }, { "name": "console_handler", "type": "statement" }, { "name": "debug", "type": "function" }, { "name": "double_check", "type": "function" }, { "name": "error", "type": "function" }, { "name": "file_handler", "type": "statement" }, { "name": "get_log_directory", "type": "function" }, { "name": "info", "type": "function" }, { "name": "json_logger", "type": "statement" }, { "name": "log_json", "type": "function" }, { "name": "logger", "type": "statement" }, { "name": "set_level", "type": "function" }, { "name": "speak_mode", "type": "statement" }, { "name": "typewriter_log", "type": "function" }, { "name": "typing_console_handler", "type": "statement" }, { "name": "typing_logger", "type": "statement" }, { "name": "warn", "type": "function" }, { "name": "_log", "type": "function" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import logging import os import random import re import time from logging import LogRecord from typing import Any from colorama import Fore, Style from pilot.log.json_handler import JsonFileHandler, JsonFormatter from pilot.singleton import Singleton from pilot.speech import say_text class Logger(metaclass=Singleton): """ Logger that handle titles in different colors. Outputs logs in console, activity.log, and errors.log For console handler: simulates typing """ def __init__(self): # create log directory if it doesn't exist this_files_dir_path = os.path.dirname(__file__) log_dir = os.path.join(this_files_dir_path, "../logs") if not os.path.exists(log_dir): os.makedirs(log_dir) log_file = "activity.log" error_file = "error.log" console_formatter = DbGptFormatter("%(title_color)s %(message)s") # Create a handler for console which simulate typing self.typing_console_handler = TypingConsoleHandler() self.typing_console_handler.setLevel(logging.INFO) self.typing_console_handler.setFormatter(console_formatter) # Create a handler for console without typing simulation self.console_handler = ConsoleHandler() self.console_handler.setLevel(logging.DEBUG) self.console_handler.setFormatter(console_formatter) # Info handler in activity.log self.file_handler = logging.FileHandler( os.path.join(log_dir, log_file), "a", "utf-8" ) self.file_handler.setLevel(logging.DEBUG) info_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(title)s %(message_no_color)s" ) self.file_handler.setFormatter(info_formatter) # Error handler error.log error_handler = logging.FileHandler( os.path.join(log_dir, error_file), "a", "utf-8" ) error_handler.setLevel(logging.ERROR) error_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s" " %(message_no_color)s" ) error_handler.setFormatter(error_formatter) self.typing_logger = logging.getLogger("TYPER") self.typing_logger.addHandler(self.typing_console_handler) self.typing_logger.addHandler(self.file_handler) self.typing_logger.addHandler(error_handler) self.typing_logger.setLevel(logging.DEBUG) self.logger = logging.getLogger("LOGGER") self.logger.addHandler(self.console_handler) self.
713
27
2,610
eosphoros-ai__DB-GPT
f684a7f6f06920e0054501c253fe0048ed36d058
pilot/logs.py
Unknown
file_handler
true
statement
18
18
false
false
[ "file_handler", "typing_logger", "console_handler", "logger", "json_logger", "__init__", "_log", "chat_plugins", "debug", "double_check", "error", "get_log_directory", "info", "log_json", "set_level", "speak_mode", "typewriter_log", "typing_console_handler", "warn", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__getstate__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "chat_plugins", "type": "statement" }, { "name": "console_handler", "type": "statement" }, { "name": "debug", "type": "function" }, { "name": "double_check", "type": "function" }, { "name": "error", "type": "function" }, { "name": "file_handler", "type": "statement" }, { "name": "get_log_directory", "type": "function" }, { "name": "info", "type": "function" }, { "name": "json_logger", "type": "statement" }, { "name": "log_json", "type": "function" }, { "name": "logger", "type": "statement" }, { "name": "set_level", "type": "function" }, { "name": "speak_mode", "type": "statement" }, { "name": "typewriter_log", "type": "function" }, { "name": "typing_console_handler", "type": "statement" }, { "name": "typing_logger", "type": "statement" }, { "name": "warn", "type": "function" }, { "name": "_log", "type": "function" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import logging import os import random import re import time from logging import LogRecord from typing import Any from colorama import Fore, Style from pilot.log.json_handler import JsonFileHandler, JsonFormatter from pilot.singleton import Singleton from pilot.speech import say_text class Logger(metaclass=Singleton): """ Logger that handle titles in different colors. Outputs logs in console, activity.log, and errors.log For console handler: simulates typing """ def __init__(self): # create log directory if it doesn't exist this_files_dir_path = os.path.dirname(__file__) log_dir = os.path.join(this_files_dir_path, "../logs") if not os.path.exists(log_dir): os.makedirs(log_dir) log_file = "activity.log" error_file = "error.log" console_formatter = DbGptFormatter("%(title_color)s %(message)s") # Create a handler for console which simulate typing self.typing_console_handler = TypingConsoleHandler() self.typing_console_handler.setLevel(logging.INFO) self.typing_console_handler.setFormatter(console_formatter) # Create a handler for console without typing simulation self.console_handler = ConsoleHandler() self.console_handler.setLevel(logging.DEBUG) self.console_handler.setFormatter(console_formatter) # Info handler in activity.log self.file_handler = logging.FileHandler( os.path.join(log_dir, log_file), "a", "utf-8" ) self.file_handler.setLevel(logging.DEBUG) info_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(title)s %(message_no_color)s" ) self.file_handler.setFormatter(info_formatter) # Error handler error.log error_handler = logging.FileHandler( os.path.join(log_dir, error_file), "a", "utf-8" ) error_handler.setLevel(logging.ERROR) error_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s" " %(message_no_color)s" ) error_handler.setFormatter(error_formatter) self.typing_logger = logging.getLogger("TYPER") self.typing_logger.addHandler(self.typing_console_handler) self.typing_logger.addHandler(self.file_handler) self.typing_logger.addHandler(error_handler) self.typing_logger.setLevel(logging.DEBUG) self.logger = logging.getLogger("LOGGER") self.logger.addHandler(self.console_handler) self.logger.addHandler(self.
714
27
2,637
eosphoros-ai__DB-GPT
f684a7f6f06920e0054501c253fe0048ed36d058
pilot/logs.py
Unknown
logger
true
statement
18
18
false
false
[ "logger", "typing_logger", "json_logger", "console_handler", "file_handler", "__init__", "_log", "chat_plugins", "debug", "double_check", "error", "get_log_directory", "info", "log_json", "set_level", "speak_mode", "typewriter_log", "typing_console_handler", "warn", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__getstate__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "chat_plugins", "type": "statement" }, { "name": "console_handler", "type": "statement" }, { "name": "debug", "type": "function" }, { "name": "double_check", "type": "function" }, { "name": "error", "type": "function" }, { "name": "file_handler", "type": "statement" }, { "name": "get_log_directory", "type": "function" }, { "name": "info", "type": "function" }, { "name": "json_logger", "type": "statement" }, { "name": "log_json", "type": "function" }, { "name": "logger", "type": "statement" }, { "name": "set_level", "type": "function" }, { "name": "speak_mode", "type": "statement" }, { "name": "typewriter_log", "type": "function" }, { "name": "typing_console_handler", "type": "statement" }, { "name": "typing_logger", "type": "statement" }, { "name": "warn", "type": "function" }, { "name": "_log", "type": "function" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import logging import os import random import re import time from logging import LogRecord from typing import Any from colorama import Fore, Style from pilot.log.json_handler import JsonFileHandler, JsonFormatter from pilot.singleton import Singleton from pilot.speech import say_text class Logger(metaclass=Singleton): """ Logger that handle titles in different colors. Outputs logs in console, activity.log, and errors.log For console handler: simulates typing """ def __init__(self): # create log directory if it doesn't exist this_files_dir_path = os.path.dirname(__file__) log_dir = os.path.join(this_files_dir_path, "../logs") if not os.path.exists(log_dir): os.makedirs(log_dir) log_file = "activity.log" error_file = "error.log" console_formatter = DbGptFormatter("%(title_color)s %(message)s") # Create a handler for console which simulate typing self.typing_console_handler = TypingConsoleHandler() self.typing_console_handler.setLevel(logging.INFO) self.typing_console_handler.setFormatter(console_formatter) # Create a handler for console without typing simulation self.console_handler = ConsoleHandler() self.console_handler.setLevel(logging.DEBUG) self.console_handler.setFormatter(console_formatter) # Info handler in activity.log self.file_handler = logging.FileHandler( os.path.join(log_dir, log_file), "a", "utf-8" ) self.file_handler.setLevel(logging.DEBUG) info_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(title)s %(message_no_color)s" ) self.file_handler.setFormatter(info_formatter) # Error handler error.log error_handler = logging.FileHandler( os.path.join(log_dir, error_file), "a", "utf-8" ) error_handler.setLevel(logging.ERROR) error_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s" " %(message_no_color)s" ) error_handler.setFormatter(error_formatter) self.typing_logger = logging.getLogger("TYPER") self.typing_logger.addHandler(self.typing_console_handler) self.typing_logger.addHandler(self.file_handler) self.typing_logger.addHandler(error_handler) self.typing_logger.setLevel(logging.DEBUG) self.logger = logging.getLogger("LOGGER") self.logger.addHandler(self.console_handler) self.logger.addHandler(self.file_handler) self.
715
27
2,683
eosphoros-ai__DB-GPT
f684a7f6f06920e0054501c253fe0048ed36d058
pilot/logs.py
Unknown
logger
true
statement
18
18
false
false
[ "logger", "typing_logger", "json_logger", "console_handler", "file_handler", "__init__", "_log", "chat_plugins", "debug", "double_check", "error", "get_log_directory", "info", "log_json", "set_level", "speak_mode", "typewriter_log", "typing_console_handler", "warn", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__getstate__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "chat_plugins", "type": "statement" }, { "name": "console_handler", "type": "statement" }, { "name": "debug", "type": "function" }, { "name": "double_check", "type": "function" }, { "name": "error", "type": "function" }, { "name": "file_handler", "type": "statement" }, { "name": "get_log_directory", "type": "function" }, { "name": "info", "type": "function" }, { "name": "json_logger", "type": "statement" }, { "name": "log_json", "type": "function" }, { "name": "logger", "type": "statement" }, { "name": "set_level", "type": "function" }, { "name": "speak_mode", "type": "statement" }, { "name": "typewriter_log", "type": "function" }, { "name": "typing_console_handler", "type": "statement" }, { "name": "typing_logger", "type": "statement" }, { "name": "warn", "type": "function" }, { "name": "_log", "type": "function" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import logging import os import random import re import time from logging import LogRecord from typing import Any from colorama import Fore, Style from pilot.log.json_handler import JsonFileHandler, JsonFormatter from pilot.singleton import Singleton from pilot.speech import say_text class Logger(metaclass=Singleton): """ Logger that handle titles in different colors. Outputs logs in console, activity.log, and errors.log For console handler: simulates typing """ def __init__(self): # create log directory if it doesn't exist this_files_dir_path = os.path.dirname(__file__) log_dir = os.path.join(this_files_dir_path, "../logs") if not os.path.exists(log_dir): os.makedirs(log_dir) log_file = "activity.log" error_file = "error.log" console_formatter = DbGptFormatter("%(title_color)s %(message)s") # Create a handler for console which simulate typing self.typing_console_handler = TypingConsoleHandler() self.typing_console_handler.setLevel(logging.INFO) self.typing_console_handler.setFormatter(console_formatter) # Create a handler for console without typing simulation self.console_handler = ConsoleHandler() self.console_handler.setLevel(logging.DEBUG) self.console_handler.setFormatter(console_formatter) # Info handler in activity.log self.file_handler = logging.FileHandler( os.path.join(log_dir, log_file), "a", "utf-8" ) self.file_handler.setLevel(logging.DEBUG) info_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(title)s %(message_no_color)s" ) self.file_handler.setFormatter(info_formatter) # Error handler error.log error_handler = logging.FileHandler( os.path.join(log_dir, error_file), "a", "utf-8" ) error_handler.setLevel(logging.ERROR) error_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s" " %(message_no_color)s" ) error_handler.setFormatter(error_formatter) self.typing_logger = logging.getLogger("TYPER") self.typing_logger.addHandler(self.typing_console_handler) self.typing_logger.addHandler(self.file_handler) self.typing_logger.addHandler(error_handler) self.typing_logger.setLevel(logging.DEBUG) self.logger = logging.getLogger("LOGGER") self.logger.addHandler(self.console_handler) self.logger.addHandler(self.file_handler) self.logger.addHandler(error_handler) self.
717
27
2,788
eosphoros-ai__DB-GPT
f684a7f6f06920e0054501c253fe0048ed36d058
pilot/logs.py
Unknown
json_logger
true
statement
18
18
false
true
[ "json_logger", "logger", "typing_logger", "console_handler", "file_handler", "__init__", "_log", "chat_plugins", "debug", "double_check", "error", "get_log_directory", "info", "log_json", "set_level", "speak_mode", "typewriter_log", "typing_console_handler", "warn", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__getstate__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "chat_plugins", "type": "statement" }, { "name": "console_handler", "type": "statement" }, { "name": "debug", "type": "function" }, { "name": "double_check", "type": "function" }, { "name": "error", "type": "function" }, { "name": "file_handler", "type": "statement" }, { "name": "get_log_directory", "type": "function" }, { "name": "info", "type": "function" }, { "name": "json_logger", "type": "statement" }, { "name": "log_json", "type": "function" }, { "name": "logger", "type": "statement" }, { "name": "set_level", "type": "function" }, { "name": "speak_mode", "type": "statement" }, { "name": "typewriter_log", "type": "function" }, { "name": "typing_console_handler", "type": "statement" }, { "name": "typing_logger", "type": "statement" }, { "name": "warn", "type": "function" }, { "name": "_log", "type": "function" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import logging import os import random import re import time from logging import LogRecord from typing import Any from colorama import Fore, Style from pilot.log.json_handler import JsonFileHandler, JsonFormatter from pilot.singleton import Singleton from pilot.speech import say_text class Logger(metaclass=Singleton): """ Logger that handle titles in different colors. Outputs logs in console, activity.log, and errors.log For console handler: simulates typing """ def __init__(self): # create log directory if it doesn't exist this_files_dir_path = os.path.dirname(__file__) log_dir = os.path.join(this_files_dir_path, "../logs") if not os.path.exists(log_dir): os.makedirs(log_dir) log_file = "activity.log" error_file = "error.log" console_formatter = DbGptFormatter("%(title_color)s %(message)s") # Create a handler for console which simulate typing self.typing_console_handler = TypingConsoleHandler() self.typing_console_handler.setLevel(logging.INFO) self.typing_console_handler.setFormatter(console_formatter) # Create a handler for console without typing simulation self.console_handler = ConsoleHandler() self.console_handler.setLevel(logging.DEBUG) self.console_handler.setFormatter(console_formatter) # Info handler in activity.log self.file_handler = logging.FileHandler( os.path.join(log_dir, log_file), "a", "utf-8" ) self.file_handler.setLevel(logging.DEBUG) info_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(title)s %(message_no_color)s" ) self.file_handler.setFormatter(info_formatter) # Error handler error.log error_handler = logging.FileHandler( os.path.join(log_dir, error_file), "a", "utf-8" ) error_handler.setLevel(logging.ERROR) error_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s" " %(message_no_color)s" ) error_handler.setFormatter(error_formatter) self.typing_logger = logging.getLogger("TYPER") self.typing_logger.addHandler(self.typing_console_handler) self.typing_logger.addHandler(self.file_handler) self.typing_logger.addHandler(error_handler) self.typing_logger.setLevel(logging.DEBUG) self.logger = logging.getLogger("LOGGER") self.logger.addHandler(self.console_handler) self.logger.addHandler(self.file_handler) self.logger.addHandler(error_handler) self.logger.setLevel(logging.DEBUG) self.json_logger = logging.getLogger("JSON_LOGGER") self.
718
27
2,816
eosphoros-ai__DB-GPT
f684a7f6f06920e0054501c253fe0048ed36d058
pilot/logs.py
Unknown
file_handler
true
statement
18
18
false
false
[ "logger", "file_handler", "typing_logger", "console_handler", "json_logger", "__init__", "_log", "chat_plugins", "debug", "double_check", "error", "get_log_directory", "info", "log_json", "set_level", "speak_mode", "typewriter_log", "typing_console_handler", "warn", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__getstate__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "chat_plugins", "type": "statement" }, { "name": "console_handler", "type": "statement" }, { "name": "debug", "type": "function" }, { "name": "double_check", "type": "function" }, { "name": "error", "type": "function" }, { "name": "file_handler", "type": "statement" }, { "name": "get_log_directory", "type": "function" }, { "name": "info", "type": "function" }, { "name": "json_logger", "type": "statement" }, { "name": "log_json", "type": "function" }, { "name": "logger", "type": "statement" }, { "name": "set_level", "type": "function" }, { "name": "speak_mode", "type": "statement" }, { "name": "typewriter_log", "type": "function" }, { "name": "typing_console_handler", "type": "statement" }, { "name": "typing_logger", "type": "statement" }, { "name": "warn", "type": "function" }, { "name": "_log", "type": "function" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import logging import os import random import re import time from logging import LogRecord from typing import Any from colorama import Fore, Style from pilot.log.json_handler import JsonFileHandler, JsonFormatter from pilot.singleton import Singleton from pilot.speech import say_text class Logger(metaclass=Singleton): """ Logger that handle titles in different colors. Outputs logs in console, activity.log, and errors.log For console handler: simulates typing """ def __init__(self): # create log directory if it doesn't exist this_files_dir_path = os.path.dirname(__file__) log_dir = os.path.join(this_files_dir_path, "../logs") if not os.path.exists(log_dir): os.makedirs(log_dir) log_file = "activity.log" error_file = "error.log" console_formatter = DbGptFormatter("%(title_color)s %(message)s") # Create a handler for console which simulate typing self.typing_console_handler = TypingConsoleHandler() self.typing_console_handler.setLevel(logging.INFO) self.typing_console_handler.setFormatter(console_formatter) # Create a handler for console without typing simulation self.console_handler = ConsoleHandler() self.console_handler.setLevel(logging.DEBUG) self.console_handler.setFormatter(console_formatter) # Info handler in activity.log self.file_handler = logging.FileHandler( os.path.join(log_dir, log_file), "a", "utf-8" ) self.file_handler.setLevel(logging.DEBUG) info_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(title)s %(message_no_color)s" ) self.file_handler.setFormatter(info_formatter) # Error handler error.log error_handler = logging.FileHandler( os.path.join(log_dir, error_file), "a", "utf-8" ) error_handler.setLevel(logging.ERROR) error_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s" " %(message_no_color)s" ) error_handler.setFormatter(error_formatter) self.typing_logger = logging.getLogger("TYPER") self.typing_logger.addHandler(self.typing_console_handler) self.typing_logger.addHandler(self.file_handler) self.typing_logger.addHandler(error_handler) self.typing_logger.setLevel(logging.DEBUG) self.logger = logging.getLogger("LOGGER") self.logger.addHandler(self.console_handler) self.logger.addHandler(self.file_handler) self.logger.addHandler(error_handler) self.logger.setLevel(logging.DEBUG) self.json_logger = logging.getLogger("JSON_LOGGER") self.json_logger.addHandler(self.
719
27
2,843
eosphoros-ai__DB-GPT
f684a7f6f06920e0054501c253fe0048ed36d058
pilot/logs.py
Unknown
json_logger
true
statement
18
18
false
false
[ "json_logger", "logger", "typing_logger", "console_handler", "file_handler", "__init__", "_log", "chat_plugins", "debug", "double_check", "error", "get_log_directory", "info", "log_json", "set_level", "speak_mode", "typewriter_log", "typing_console_handler", "warn", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__getstate__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "chat_plugins", "type": "statement" }, { "name": "console_handler", "type": "statement" }, { "name": "debug", "type": "function" }, { "name": "double_check", "type": "function" }, { "name": "error", "type": "function" }, { "name": "file_handler", "type": "statement" }, { "name": "get_log_directory", "type": "function" }, { "name": "info", "type": "function" }, { "name": "json_logger", "type": "statement" }, { "name": "log_json", "type": "function" }, { "name": "logger", "type": "statement" }, { "name": "set_level", "type": "function" }, { "name": "speak_mode", "type": "statement" }, { "name": "typewriter_log", "type": "function" }, { "name": "typing_console_handler", "type": "statement" }, { "name": "typing_logger", "type": "statement" }, { "name": "warn", "type": "function" }, { "name": "_log", "type": "function" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import logging import os import random import re import time from logging import LogRecord from typing import Any from colorama import Fore, Style from pilot.log.json_handler import JsonFileHandler, JsonFormatter from pilot.singleton import Singleton from pilot.speech import say_text class Logger(metaclass=Singleton): """ Logger that handle titles in different colors. Outputs logs in console, activity.log, and errors.log For console handler: simulates typing """ def __init__(self): # create log directory if it doesn't exist this_files_dir_path = os.path.dirname(__file__) log_dir = os.path.join(this_files_dir_path, "../logs") if not os.path.exists(log_dir): os.makedirs(log_dir) log_file = "activity.log" error_file = "error.log" console_formatter = DbGptFormatter("%(title_color)s %(message)s") # Create a handler for console which simulate typing self.typing_console_handler = TypingConsoleHandler() self.typing_console_handler.setLevel(logging.INFO) self.typing_console_handler.setFormatter(console_formatter) # Create a handler for console without typing simulation self.console_handler = ConsoleHandler() self.console_handler.setLevel(logging.DEBUG) self.console_handler.setFormatter(console_formatter) # Info handler in activity.log self.file_handler = logging.FileHandler( os.path.join(log_dir, log_file), "a", "utf-8" ) self.file_handler.setLevel(logging.DEBUG) info_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(title)s %(message_no_color)s" ) self.file_handler.setFormatter(info_formatter) # Error handler error.log error_handler = logging.FileHandler( os.path.join(log_dir, error_file), "a", "utf-8" ) error_handler.setLevel(logging.ERROR) error_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s" " %(message_no_color)s" ) error_handler.setFormatter(error_formatter) self.typing_logger = logging.getLogger("TYPER") self.typing_logger.addHandler(self.typing_console_handler) self.typing_logger.addHandler(self.file_handler) self.typing_logger.addHandler(error_handler) self.typing_logger.setLevel(logging.DEBUG) self.logger = logging.getLogger("LOGGER") self.logger.addHandler(self.console_handler) self.logger.addHandler(self.file_handler) self.logger.addHandler(error_handler) self.logger.setLevel(logging.DEBUG) self.json_logger = logging.getLogger("JSON_LOGGER") self.json_logger.addHandler(self.file_handler) self.
720
27
2,894
eosphoros-ai__DB-GPT
f684a7f6f06920e0054501c253fe0048ed36d058
pilot/logs.py
Unknown
json_logger
true
statement
18
18
false
false
[ "json_logger", "logger", "typing_logger", "console_handler", "file_handler", "__init__", "_log", "chat_plugins", "debug", "double_check", "error", "get_log_directory", "info", "log_json", "set_level", "speak_mode", "typewriter_log", "typing_console_handler", "warn", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__getstate__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "chat_plugins", "type": "statement" }, { "name": "console_handler", "type": "statement" }, { "name": "debug", "type": "function" }, { "name": "double_check", "type": "function" }, { "name": "error", "type": "function" }, { "name": "file_handler", "type": "statement" }, { "name": "get_log_directory", "type": "function" }, { "name": "info", "type": "function" }, { "name": "json_logger", "type": "statement" }, { "name": "log_json", "type": "function" }, { "name": "logger", "type": "statement" }, { "name": "set_level", "type": "function" }, { "name": "speak_mode", "type": "statement" }, { "name": "typewriter_log", "type": "function" }, { "name": "typing_console_handler", "type": "statement" }, { "name": "typing_logger", "type": "statement" }, { "name": "warn", "type": "function" }, { "name": "_log", "type": "function" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import logging import os import random import re import time from logging import LogRecord from typing import Any from colorama import Fore, Style from pilot.log.json_handler import JsonFileHandler, JsonFormatter from pilot.singleton import Singleton from pilot.speech import say_text class Logger(metaclass=Singleton): """ Logger that handle titles in different colors. Outputs logs in console, activity.log, and errors.log For console handler: simulates typing """ def __init__(self): # create log directory if it doesn't exist this_files_dir_path = os.path.dirname(__file__) log_dir = os.path.join(this_files_dir_path, "../logs") if not os.path.exists(log_dir): os.makedirs(log_dir) log_file = "activity.log" error_file = "error.log" console_formatter = DbGptFormatter("%(title_color)s %(message)s") # Create a handler for console which simulate typing self.typing_console_handler = TypingConsoleHandler() self.typing_console_handler.setLevel(logging.INFO) self.typing_console_handler.setFormatter(console_formatter) # Create a handler for console without typing simulation self.console_handler = ConsoleHandler() self.console_handler.setLevel(logging.DEBUG) self.console_handler.setFormatter(console_formatter) # Info handler in activity.log self.file_handler = logging.FileHandler( os.path.join(log_dir, log_file), "a", "utf-8" ) self.file_handler.setLevel(logging.DEBUG) info_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(title)s %(message_no_color)s" ) self.file_handler.setFormatter(info_formatter) # Error handler error.log error_handler = logging.FileHandler( os.path.join(log_dir, error_file), "a", "utf-8" ) error_handler.setLevel(logging.ERROR) error_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s" " %(message_no_color)s" ) error_handler.setFormatter(error_formatter) self.typing_logger = logging.getLogger("TYPER") self.typing_logger.addHandler(self.typing_console_handler) self.typing_logger.addHandler(self.file_handler) self.typing_logger.addHandler(error_handler) self.typing_logger.setLevel(logging.DEBUG) self.logger = logging.getLogger("LOGGER") self.logger.addHandler(self.console_handler) self.logger.addHandler(self.file_handler) self.logger.addHandler(error_handler) self.logger.setLevel(logging.DEBUG) self.json_logger = logging.getLogger("JSON_LOGGER") self.json_logger.addHandler(self.file_handler) self.json_logger.addHandler(error_handler) self.
723
27
3,150
eosphoros-ai__DB-GPT
f684a7f6f06920e0054501c253fe0048ed36d058
pilot/logs.py
Unknown
speak_mode
true
statement
18
18
false
true
[ "speak_mode", "logger", "json_logger", "typing_logger", "file_handler", "__init__", "_log", "chat_plugins", "console_handler", "debug", "double_check", "error", "get_log_directory", "info", "log_json", "set_level", "typewriter_log", "typing_console_handler", "warn", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__getstate__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "chat_plugins", "type": "statement" }, { "name": "console_handler", "type": "statement" }, { "name": "debug", "type": "function" }, { "name": "double_check", "type": "function" }, { "name": "error", "type": "function" }, { "name": "file_handler", "type": "statement" }, { "name": "get_log_directory", "type": "function" }, { "name": "info", "type": "function" }, { "name": "json_logger", "type": "statement" }, { "name": "log_json", "type": "function" }, { "name": "logger", "type": "statement" }, { "name": "set_level", "type": "function" }, { "name": "speak_mode", "type": "statement" }, { "name": "typewriter_log", "type": "function" }, { "name": "typing_console_handler", "type": "statement" }, { "name": "typing_logger", "type": "statement" }, { "name": "warn", "type": "function" }, { "name": "_log", "type": "function" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import logging import os import random import re import time from logging import LogRecord from typing import Any from colorama import Fore, Style from pilot.log.json_handler import JsonFileHandler, JsonFormatter from pilot.singleton import Singleton from pilot.speech import say_text class Logger(metaclass=Singleton): """ Logger that handle titles in different colors. Outputs logs in console, activity.log, and errors.log For console handler: simulates typing """ def __init__(self): # create log directory if it doesn't exist this_files_dir_path = os.path.dirname(__file__) log_dir = os.path.join(this_files_dir_path, "../logs") if not os.path.exists(log_dir): os.makedirs(log_dir) log_file = "activity.log" error_file = "error.log" console_formatter = DbGptFormatter("%(title_color)s %(message)s") # Create a handler for console which simulate typing self.typing_console_handler = TypingConsoleHandler() self.typing_console_handler.setLevel(logging.INFO) self.typing_console_handler.setFormatter(console_formatter) # Create a handler for console without typing simulation self.console_handler = ConsoleHandler() self.console_handler.setLevel(logging.DEBUG) self.console_handler.setFormatter(console_formatter) # Info handler in activity.log self.file_handler = logging.FileHandler( os.path.join(log_dir, log_file), "a", "utf-8" ) self.file_handler.setLevel(logging.DEBUG) info_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(title)s %(message_no_color)s" ) self.file_handler.setFormatter(info_formatter) # Error handler error.log error_handler = logging.FileHandler( os.path.join(log_dir, error_file), "a", "utf-8" ) error_handler.setLevel(logging.ERROR) error_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s" " %(message_no_color)s" ) error_handler.setFormatter(error_formatter) self.typing_logger = logging.getLogger("TYPER") self.typing_logger.addHandler(self.typing_console_handler) self.typing_logger.addHandler(self.file_handler) self.typing_logger.addHandler(error_handler) self.typing_logger.setLevel(logging.DEBUG) self.logger = logging.getLogger("LOGGER") self.logger.addHandler(self.console_handler) self.logger.addHandler(self.file_handler) self.logger.addHandler(error_handler) self.logger.setLevel(logging.DEBUG) self.json_logger = logging.getLogger("JSON_LOGGER") self.json_logger.addHandler(self.file_handler) self.json_logger.addHandler(error_handler) self.json_logger.setLevel(logging.DEBUG) self.speak_mode = False self.chat_plugins = [] def typewriter_log( self, title="", title_color="", content="", speak_text=False, level=logging.INFO ): if speak_text and self.
724
27
3,234
eosphoros-ai__DB-GPT
f684a7f6f06920e0054501c253fe0048ed36d058
pilot/logs.py
Unknown
chat_plugins
true
statement
18
18
false
true
[ "chat_plugins", "logger", "json_logger", "typing_logger", "file_handler", "__init__", "_log", "console_handler", "debug", "double_check", "error", "get_log_directory", "info", "log_json", "set_level", "speak_mode", "typewriter_log", "typing_console_handler", "warn", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__getstate__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "chat_plugins", "type": "statement" }, { "name": "console_handler", "type": "statement" }, { "name": "debug", "type": "function" }, { "name": "double_check", "type": "function" }, { "name": "error", "type": "function" }, { "name": "file_handler", "type": "statement" }, { "name": "get_log_directory", "type": "function" }, { "name": "info", "type": "function" }, { "name": "json_logger", "type": "statement" }, { "name": "log_json", "type": "function" }, { "name": "logger", "type": "statement" }, { "name": "set_level", "type": "function" }, { "name": "speak_mode", "type": "statement" }, { "name": "typewriter_log", "type": "function" }, { "name": "typing_console_handler", "type": "statement" }, { "name": "typing_logger", "type": "statement" }, { "name": "warn", "type": "function" }, { "name": "_log", "type": "function" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import logging import os import random import re import time from logging import LogRecord from typing import Any from colorama import Fore, Style from pilot.log.json_handler import JsonFileHandler, JsonFormatter from pilot.singleton import Singleton from pilot.speech import say_text class Logger(metaclass=Singleton): """ Logger that handle titles in different colors. Outputs logs in console, activity.log, and errors.log For console handler: simulates typing """ def __init__(self): # create log directory if it doesn't exist this_files_dir_path = os.path.dirname(__file__) log_dir = os.path.join(this_files_dir_path, "../logs") if not os.path.exists(log_dir): os.makedirs(log_dir) log_file = "activity.log" error_file = "error.log" console_formatter = DbGptFormatter("%(title_color)s %(message)s") # Create a handler for console which simulate typing self.typing_console_handler = TypingConsoleHandler() self.typing_console_handler.setLevel(logging.INFO) self.typing_console_handler.setFormatter(console_formatter) # Create a handler for console without typing simulation self.console_handler = ConsoleHandler() self.console_handler.setLevel(logging.DEBUG) self.console_handler.setFormatter(console_formatter) # Info handler in activity.log self.file_handler = logging.FileHandler( os.path.join(log_dir, log_file), "a", "utf-8" ) self.file_handler.setLevel(logging.DEBUG) info_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(title)s %(message_no_color)s" ) self.file_handler.setFormatter(info_formatter) # Error handler error.log error_handler = logging.FileHandler( os.path.join(log_dir, error_file), "a", "utf-8" ) error_handler.setLevel(logging.ERROR) error_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s" " %(message_no_color)s" ) error_handler.setFormatter(error_formatter) self.typing_logger = logging.getLogger("TYPER") self.typing_logger.addHandler(self.typing_console_handler) self.typing_logger.addHandler(self.file_handler) self.typing_logger.addHandler(error_handler) self.typing_logger.setLevel(logging.DEBUG) self.logger = logging.getLogger("LOGGER") self.logger.addHandler(self.console_handler) self.logger.addHandler(self.file_handler) self.logger.addHandler(error_handler) self.logger.setLevel(logging.DEBUG) self.json_logger = logging.getLogger("JSON_LOGGER") self.json_logger.addHandler(self.file_handler) self.json_logger.addHandler(error_handler) self.json_logger.setLevel(logging.DEBUG) self.speak_mode = False self.chat_plugins = [] def typewriter_log( self, title="", title_color="", content="", speak_text=False, level=logging.INFO ): if speak_text and self.speak_mode: say_text(f"{title}. {content}") for plugin in self.
725
27
3,457
eosphoros-ai__DB-GPT
f684a7f6f06920e0054501c253fe0048ed36d058
pilot/logs.py
random
typing_logger
true
statement
18
18
false
false
[ "logger", "json_logger", "typing_logger", "file_handler", "chat_plugins", "__init__", "_log", "console_handler", "debug", "double_check", "error", "get_log_directory", "info", "log_json", "set_level", "speak_mode", "typewriter_log", "typing_console_handler", "warn", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__getstate__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "chat_plugins", "type": "statement" }, { "name": "console_handler", "type": "statement" }, { "name": "debug", "type": "function" }, { "name": "double_check", "type": "function" }, { "name": "error", "type": "function" }, { "name": "file_handler", "type": "statement" }, { "name": "get_log_directory", "type": "function" }, { "name": "info", "type": "function" }, { "name": "json_logger", "type": "statement" }, { "name": "log_json", "type": "function" }, { "name": "logger", "type": "statement" }, { "name": "set_level", "type": "function" }, { "name": "speak_mode", "type": "statement" }, { "name": "typewriter_log", "type": "function" }, { "name": "typing_console_handler", "type": "statement" }, { "name": "typing_logger", "type": "statement" }, { "name": "warn", "type": "function" }, { "name": "_log", "type": "function" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import logging import os import random import re import time from logging import LogRecord from typing import Any from colorama import Fore, Style from pilot.log.json_handler import JsonFileHandler, JsonFormatter from pilot.singleton import Singleton from pilot.speech import say_text class Logger(metaclass=Singleton): """ Logger that handle titles in different colors. Outputs logs in console, activity.log, and errors.log For console handler: simulates typing """ def __init__(self): # create log directory if it doesn't exist this_files_dir_path = os.path.dirname(__file__) log_dir = os.path.join(this_files_dir_path, "../logs") if not os.path.exists(log_dir): os.makedirs(log_dir) log_file = "activity.log" error_file = "error.log" console_formatter = DbGptFormatter("%(title_color)s %(message)s") # Create a handler for console which simulate typing self.typing_console_handler = TypingConsoleHandler() self.typing_console_handler.setLevel(logging.INFO) self.typing_console_handler.setFormatter(console_formatter) # Create a handler for console without typing simulation self.console_handler = ConsoleHandler() self.console_handler.setLevel(logging.DEBUG) self.console_handler.setFormatter(console_formatter) # Info handler in activity.log self.file_handler = logging.FileHandler( os.path.join(log_dir, log_file), "a", "utf-8" ) self.file_handler.setLevel(logging.DEBUG) info_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(title)s %(message_no_color)s" ) self.file_handler.setFormatter(info_formatter) # Error handler error.log error_handler = logging.FileHandler( os.path.join(log_dir, error_file), "a", "utf-8" ) error_handler.setLevel(logging.ERROR) error_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s" " %(message_no_color)s" ) error_handler.setFormatter(error_formatter) self.typing_logger = logging.getLogger("TYPER") self.typing_logger.addHandler(self.typing_console_handler) self.typing_logger.addHandler(self.file_handler) self.typing_logger.addHandler(error_handler) self.typing_logger.setLevel(logging.DEBUG) self.logger = logging.getLogger("LOGGER") self.logger.addHandler(self.console_handler) self.logger.addHandler(self.file_handler) self.logger.addHandler(error_handler) self.logger.setLevel(logging.DEBUG) self.json_logger = logging.getLogger("JSON_LOGGER") self.json_logger.addHandler(self.file_handler) self.json_logger.addHandler(error_handler) self.json_logger.setLevel(logging.DEBUG) self.speak_mode = False self.chat_plugins = [] def typewriter_log( self, title="", title_color="", content="", speak_text=False, level=logging.INFO ): if speak_text and self.speak_mode: say_text(f"{title}. {content}") for plugin in self.chat_plugins: plugin.report(f"{title}. {content}") if content: if isinstance(content, list): content = " ".join(content) else: content = "" self.
726
27
3,684
eosphoros-ai__DB-GPT
f684a7f6f06920e0054501c253fe0048ed36d058
pilot/logs.py
infile
_log
true
function
18
18
true
true
[ "logger", "json_logger", "typing_logger", "_log", "file_handler", "__init__", "chat_plugins", "console_handler", "debug", "double_check", "error", "get_log_directory", "info", "log_json", "set_level", "speak_mode", "typewriter_log", "typing_console_handler", "warn", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__getstate__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "chat_plugins", "type": "statement" }, { "name": "console_handler", "type": "statement" }, { "name": "debug", "type": "function" }, { "name": "double_check", "type": "function" }, { "name": "error", "type": "function" }, { "name": "file_handler", "type": "statement" }, { "name": "get_log_directory", "type": "function" }, { "name": "info", "type": "function" }, { "name": "json_logger", "type": "statement" }, { "name": "log_json", "type": "function" }, { "name": "logger", "type": "statement" }, { "name": "set_level", "type": "function" }, { "name": "speak_mode", "type": "statement" }, { "name": "typewriter_log", "type": "function" }, { "name": "typing_console_handler", "type": "statement" }, { "name": "typing_logger", "type": "statement" }, { "name": "warn", "type": "function" }, { "name": "_log", "type": "function" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import logging import os import random import re import time from logging import LogRecord from typing import Any from colorama import Fore, Style from pilot.log.json_handler import JsonFileHandler, JsonFormatter from pilot.singleton import Singleton from pilot.speech import say_text class Logger(metaclass=Singleton): """ Logger that handle titles in different colors. Outputs logs in console, activity.log, and errors.log For console handler: simulates typing """ def __init__(self): # create log directory if it doesn't exist this_files_dir_path = os.path.dirname(__file__) log_dir = os.path.join(this_files_dir_path, "../logs") if not os.path.exists(log_dir): os.makedirs(log_dir) log_file = "activity.log" error_file = "error.log" console_formatter = DbGptFormatter("%(title_color)s %(message)s") # Create a handler for console which simulate typing self.typing_console_handler = TypingConsoleHandler() self.typing_console_handler.setLevel(logging.INFO) self.typing_console_handler.setFormatter(console_formatter) # Create a handler for console without typing simulation self.console_handler = ConsoleHandler() self.console_handler.setLevel(logging.DEBUG) self.console_handler.setFormatter(console_formatter) # Info handler in activity.log self.file_handler = logging.FileHandler( os.path.join(log_dir, log_file), "a", "utf-8" ) self.file_handler.setLevel(logging.DEBUG) info_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(title)s %(message_no_color)s" ) self.file_handler.setFormatter(info_formatter) # Error handler error.log error_handler = logging.FileHandler( os.path.join(log_dir, error_file), "a", "utf-8" ) error_handler.setLevel(logging.ERROR) error_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s" " %(message_no_color)s" ) error_handler.setFormatter(error_formatter) self.typing_logger = logging.getLogger("TYPER") self.typing_logger.addHandler(self.typing_console_handler) self.typing_logger.addHandler(self.file_handler) self.typing_logger.addHandler(error_handler) self.typing_logger.setLevel(logging.DEBUG) self.logger = logging.getLogger("LOGGER") self.logger.addHandler(self.console_handler) self.logger.addHandler(self.file_handler) self.logger.addHandler(error_handler) self.logger.setLevel(logging.DEBUG) self.json_logger = logging.getLogger("JSON_LOGGER") self.json_logger.addHandler(self.file_handler) self.json_logger.addHandler(error_handler) self.json_logger.setLevel(logging.DEBUG) self.speak_mode = False self.chat_plugins = [] def typewriter_log( self, title="", title_color="", content="", speak_text=False, level=logging.INFO ): if speak_text and self.speak_mode: say_text(f"{title}. {content}") for plugin in self.chat_plugins: plugin.report(f"{title}. {content}") if content: if isinstance(content, list): content = " ".join(content) else: content = "" self.typing_logger.log( level, content, extra={"title": title, "color": title_color} ) def debug( self, message, title="", title_color="", ): self.
727
27
3,857
eosphoros-ai__DB-GPT
f684a7f6f06920e0054501c253fe0048ed36d058
pilot/logs.py
infile
_log
true
function
18
18
true
false
[ "logger", "json_logger", "typing_logger", "_log", "file_handler", "__init__", "chat_plugins", "console_handler", "debug", "double_check", "error", "get_log_directory", "info", "log_json", "set_level", "speak_mode", "typewriter_log", "typing_console_handler", "warn", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__getstate__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "chat_plugins", "type": "statement" }, { "name": "console_handler", "type": "statement" }, { "name": "debug", "type": "function" }, { "name": "double_check", "type": "function" }, { "name": "error", "type": "function" }, { "name": "file_handler", "type": "statement" }, { "name": "get_log_directory", "type": "function" }, { "name": "info", "type": "function" }, { "name": "json_logger", "type": "statement" }, { "name": "log_json", "type": "function" }, { "name": "logger", "type": "statement" }, { "name": "set_level", "type": "function" }, { "name": "speak_mode", "type": "statement" }, { "name": "typewriter_log", "type": "function" }, { "name": "typing_console_handler", "type": "statement" }, { "name": "typing_logger", "type": "statement" }, { "name": "warn", "type": "function" }, { "name": "_log", "type": "function" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import logging import os import random import re import time from logging import LogRecord from typing import Any from colorama import Fore, Style from pilot.log.json_handler import JsonFileHandler, JsonFormatter from pilot.singleton import Singleton from pilot.speech import say_text class Logger(metaclass=Singleton): """ Logger that handle titles in different colors. Outputs logs in console, activity.log, and errors.log For console handler: simulates typing """ def __init__(self): # create log directory if it doesn't exist this_files_dir_path = os.path.dirname(__file__) log_dir = os.path.join(this_files_dir_path, "../logs") if not os.path.exists(log_dir): os.makedirs(log_dir) log_file = "activity.log" error_file = "error.log" console_formatter = DbGptFormatter("%(title_color)s %(message)s") # Create a handler for console which simulate typing self.typing_console_handler = TypingConsoleHandler() self.typing_console_handler.setLevel(logging.INFO) self.typing_console_handler.setFormatter(console_formatter) # Create a handler for console without typing simulation self.console_handler = ConsoleHandler() self.console_handler.setLevel(logging.DEBUG) self.console_handler.setFormatter(console_formatter) # Info handler in activity.log self.file_handler = logging.FileHandler( os.path.join(log_dir, log_file), "a", "utf-8" ) self.file_handler.setLevel(logging.DEBUG) info_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(title)s %(message_no_color)s" ) self.file_handler.setFormatter(info_formatter) # Error handler error.log error_handler = logging.FileHandler( os.path.join(log_dir, error_file), "a", "utf-8" ) error_handler.setLevel(logging.ERROR) error_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s" " %(message_no_color)s" ) error_handler.setFormatter(error_formatter) self.typing_logger = logging.getLogger("TYPER") self.typing_logger.addHandler(self.typing_console_handler) self.typing_logger.addHandler(self.file_handler) self.typing_logger.addHandler(error_handler) self.typing_logger.setLevel(logging.DEBUG) self.logger = logging.getLogger("LOGGER") self.logger.addHandler(self.console_handler) self.logger.addHandler(self.file_handler) self.logger.addHandler(error_handler) self.logger.setLevel(logging.DEBUG) self.json_logger = logging.getLogger("JSON_LOGGER") self.json_logger.addHandler(self.file_handler) self.json_logger.addHandler(error_handler) self.json_logger.setLevel(logging.DEBUG) self.speak_mode = False self.chat_plugins = [] def typewriter_log( self, title="", title_color="", content="", speak_text=False, level=logging.INFO ): if speak_text and self.speak_mode: say_text(f"{title}. {content}") for plugin in self.chat_plugins: plugin.report(f"{title}. {content}") if content: if isinstance(content, list): content = " ".join(content) else: content = "" self.typing_logger.log( level, content, extra={"title": title, "color": title_color} ) def debug( self, message, title="", title_color="", ): self._log(title, title_color, message, logging.DEBUG) def info( self, message, title="", title_color="", ): self.
728
27
4,029
eosphoros-ai__DB-GPT
f684a7f6f06920e0054501c253fe0048ed36d058
pilot/logs.py
infile
_log
true
function
18
18
true
false
[ "logger", "json_logger", "typing_logger", "_log", "file_handler", "__init__", "chat_plugins", "console_handler", "debug", "double_check", "error", "get_log_directory", "info", "log_json", "set_level", "speak_mode", "typewriter_log", "typing_console_handler", "warn", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__getstate__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "chat_plugins", "type": "statement" }, { "name": "console_handler", "type": "statement" }, { "name": "debug", "type": "function" }, { "name": "double_check", "type": "function" }, { "name": "error", "type": "function" }, { "name": "file_handler", "type": "statement" }, { "name": "get_log_directory", "type": "function" }, { "name": "info", "type": "function" }, { "name": "json_logger", "type": "statement" }, { "name": "log_json", "type": "function" }, { "name": "logger", "type": "statement" }, { "name": "set_level", "type": "function" }, { "name": "speak_mode", "type": "statement" }, { "name": "typewriter_log", "type": "function" }, { "name": "typing_console_handler", "type": "statement" }, { "name": "typing_logger", "type": "statement" }, { "name": "warn", "type": "function" }, { "name": "_log", "type": "function" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import logging import os import random import re import time from logging import LogRecord from typing import Any from colorama import Fore, Style from pilot.log.json_handler import JsonFileHandler, JsonFormatter from pilot.singleton import Singleton from pilot.speech import say_text class Logger(metaclass=Singleton): """ Logger that handle titles in different colors. Outputs logs in console, activity.log, and errors.log For console handler: simulates typing """ def __init__(self): # create log directory if it doesn't exist this_files_dir_path = os.path.dirname(__file__) log_dir = os.path.join(this_files_dir_path, "../logs") if not os.path.exists(log_dir): os.makedirs(log_dir) log_file = "activity.log" error_file = "error.log" console_formatter = DbGptFormatter("%(title_color)s %(message)s") # Create a handler for console which simulate typing self.typing_console_handler = TypingConsoleHandler() self.typing_console_handler.setLevel(logging.INFO) self.typing_console_handler.setFormatter(console_formatter) # Create a handler for console without typing simulation self.console_handler = ConsoleHandler() self.console_handler.setLevel(logging.DEBUG) self.console_handler.setFormatter(console_formatter) # Info handler in activity.log self.file_handler = logging.FileHandler( os.path.join(log_dir, log_file), "a", "utf-8" ) self.file_handler.setLevel(logging.DEBUG) info_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(title)s %(message_no_color)s" ) self.file_handler.setFormatter(info_formatter) # Error handler error.log error_handler = logging.FileHandler( os.path.join(log_dir, error_file), "a", "utf-8" ) error_handler.setLevel(logging.ERROR) error_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s" " %(message_no_color)s" ) error_handler.setFormatter(error_formatter) self.typing_logger = logging.getLogger("TYPER") self.typing_logger.addHandler(self.typing_console_handler) self.typing_logger.addHandler(self.file_handler) self.typing_logger.addHandler(error_handler) self.typing_logger.setLevel(logging.DEBUG) self.logger = logging.getLogger("LOGGER") self.logger.addHandler(self.console_handler) self.logger.addHandler(self.file_handler) self.logger.addHandler(error_handler) self.logger.setLevel(logging.DEBUG) self.json_logger = logging.getLogger("JSON_LOGGER") self.json_logger.addHandler(self.file_handler) self.json_logger.addHandler(error_handler) self.json_logger.setLevel(logging.DEBUG) self.speak_mode = False self.chat_plugins = [] def typewriter_log( self, title="", title_color="", content="", speak_text=False, level=logging.INFO ): if speak_text and self.speak_mode: say_text(f"{title}. {content}") for plugin in self.chat_plugins: plugin.report(f"{title}. {content}") if content: if isinstance(content, list): content = " ".join(content) else: content = "" self.typing_logger.log( level, content, extra={"title": title, "color": title_color} ) def debug( self, message, title="", title_color="", ): self._log(title, title_color, message, logging.DEBUG) def info( self, message, title="", title_color="", ): self._log(title, title_color, message, logging.INFO) def warn( self, message, title="", title_color="", ): self.
729
27
4,131
eosphoros-ai__DB-GPT
f684a7f6f06920e0054501c253fe0048ed36d058
pilot/logs.py
infile
_log
true
function
18
18
true
false
[ "logger", "json_logger", "typing_logger", "file_handler", "_log", "__init__", "chat_plugins", "console_handler", "debug", "double_check", "error", "get_log_directory", "info", "log_json", "set_level", "speak_mode", "typewriter_log", "typing_console_handler", "warn", "__annotations__", "__class__", "__delattr__", "__dict__", "__dir__", "__eq__", "__format__", "__getattribute__", "__getstate__", "__hash__", "__init_subclass__", "__ne__", "__new__", "__reduce__", "__reduce_ex__", "__repr__", "__setattr__", "__sizeof__", "__str__", "__subclasshook__", "__doc__", "__module__" ]
[ { "name": "chat_plugins", "type": "statement" }, { "name": "console_handler", "type": "statement" }, { "name": "debug", "type": "function" }, { "name": "double_check", "type": "function" }, { "name": "error", "type": "function" }, { "name": "file_handler", "type": "statement" }, { "name": "get_log_directory", "type": "function" }, { "name": "info", "type": "function" }, { "name": "json_logger", "type": "statement" }, { "name": "log_json", "type": "function" }, { "name": "logger", "type": "statement" }, { "name": "set_level", "type": "function" }, { "name": "speak_mode", "type": "statement" }, { "name": "typewriter_log", "type": "function" }, { "name": "typing_console_handler", "type": "statement" }, { "name": "typing_logger", "type": "statement" }, { "name": "warn", "type": "function" }, { "name": "_log", "type": "function" }, { "name": "__annotations__", "type": "statement" }, { "name": "__class__", "type": "property" }, { "name": "__delattr__", "type": "function" }, { "name": "__dict__", "type": "statement" }, { "name": "__dir__", "type": "function" }, { "name": "__doc__", "type": "statement" }, { "name": "__eq__", "type": "function" }, { "name": "__format__", "type": "function" }, { "name": "__getattribute__", "type": "function" }, { "name": "__hash__", "type": "function" }, { "name": "__init__", "type": "function" }, { "name": "__init_subclass__", "type": "function" }, { "name": "__module__", "type": "statement" }, { "name": "__ne__", "type": "function" }, { "name": "__new__", "type": "function" }, { "name": "__reduce__", "type": "function" }, { "name": "__reduce_ex__", "type": "function" }, { "name": "__repr__", "type": "function" }, { "name": "__setattr__", "type": "function" }, { "name": "__sizeof__", "type": "function" }, { "name": "__slots__", "type": "statement" }, { "name": "__str__", "type": "function" } ]
import logging import os import random import re import time from logging import LogRecord from typing import Any from colorama import Fore, Style from pilot.log.json_handler import JsonFileHandler, JsonFormatter from pilot.singleton import Singleton from pilot.speech import say_text class Logger(metaclass=Singleton): """ Logger that handle titles in different colors. Outputs logs in console, activity.log, and errors.log For console handler: simulates typing """ def __init__(self): # create log directory if it doesn't exist this_files_dir_path = os.path.dirname(__file__) log_dir = os.path.join(this_files_dir_path, "../logs") if not os.path.exists(log_dir): os.makedirs(log_dir) log_file = "activity.log" error_file = "error.log" console_formatter = DbGptFormatter("%(title_color)s %(message)s") # Create a handler for console which simulate typing self.typing_console_handler = TypingConsoleHandler() self.typing_console_handler.setLevel(logging.INFO) self.typing_console_handler.setFormatter(console_formatter) # Create a handler for console without typing simulation self.console_handler = ConsoleHandler() self.console_handler.setLevel(logging.DEBUG) self.console_handler.setFormatter(console_formatter) # Info handler in activity.log self.file_handler = logging.FileHandler( os.path.join(log_dir, log_file), "a", "utf-8" ) self.file_handler.setLevel(logging.DEBUG) info_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(title)s %(message_no_color)s" ) self.file_handler.setFormatter(info_formatter) # Error handler error.log error_handler = logging.FileHandler( os.path.join(log_dir, error_file), "a", "utf-8" ) error_handler.setLevel(logging.ERROR) error_formatter = DbGptFormatter( "%(asctime)s %(levelname)s %(module)s:%(funcName)s:%(lineno)d %(title)s" " %(message_no_color)s" ) error_handler.setFormatter(error_formatter) self.typing_logger = logging.getLogger("TYPER") self.typing_logger.addHandler(self.typing_console_handler) self.typing_logger.addHandler(self.file_handler) self.typing_logger.addHandler(error_handler) self.typing_logger.setLevel(logging.DEBUG) self.logger = logging.getLogger("LOGGER") self.logger.addHandler(self.console_handler) self.logger.addHandler(self.file_handler) self.logger.addHandler(error_handler) self.logger.setLevel(logging.DEBUG) self.json_logger = logging.getLogger("JSON_LOGGER") self.json_logger.addHandler(self.file_handler) self.json_logger.addHandler(error_handler) self.json_logger.setLevel(logging.DEBUG) self.speak_mode = False self.chat_plugins = [] def typewriter_log( self, title="", title_color="", content="", speak_text=False, level=logging.INFO ): if speak_text and self.speak_mode: say_text(f"{title}. {content}") for plugin in self.chat_plugins: plugin.report(f"{title}. {content}") if content: if isinstance(content, list): content = " ".join(content) else: content = "" self.typing_logger.log( level, content, extra={"title": title, "color": title_color} ) def debug( self, message, title="", title_color="", ): self._log(title, title_color, message, logging.DEBUG) def info( self, message, title="", title_color="", ): self._log(title, title_color, message, logging.INFO) def warn( self, message, title="", title_color="", ): self._log(title, title_color, message, logging.WARN) def error(self, title, message=""): self.